commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
80c749e8b8395f20305c04c480fbf39400f1b5a4
|
features/tests/test_index.py
|
features/tests/test_index.py
|
from django.test import TestCase
class TestIndex(TestCase):
"""Verify the index page is served properly"""
def test_root(self):
# Fetch page from '/'
reponse = self.client.get('/')
# Should respond OK
self.assertEqual(reponse.status_code, 200)
# Should be rendered from a particular template
self.assertEqual(reponse.templates[0].name, 'features/index.html')
def test_only_root(self):
"""Should only be served from /"""
# Fetch from path that shouldn't exist
response = self.client.get('/extrastuff')
# Should 404 out
self.assertEqual(response.status_code, 404)
# Fetch from path that may exist
response = self.client.get('/api/')
# Make sure it's not the index if it does exist
if response.status_code == 200:
if response.templates:
self.assertNotEqual(response.templates[0].name, 'features/index.html')
|
Add failing test for index route
|
Add failing test for index route
Currently the index route gloms every other route.
|
Python
|
mit
|
wkevina/feature-requests-app,wkevina/feature-requests-app,wkevina/feature-requests-app
|
Add failing test for index route
Currently the index route gloms every other route.
|
from django.test import TestCase
class TestIndex(TestCase):
"""Verify the index page is served properly"""
def test_root(self):
# Fetch page from '/'
reponse = self.client.get('/')
# Should respond OK
self.assertEqual(reponse.status_code, 200)
# Should be rendered from a particular template
self.assertEqual(reponse.templates[0].name, 'features/index.html')
def test_only_root(self):
"""Should only be served from /"""
# Fetch from path that shouldn't exist
response = self.client.get('/extrastuff')
# Should 404 out
self.assertEqual(response.status_code, 404)
# Fetch from path that may exist
response = self.client.get('/api/')
# Make sure it's not the index if it does exist
if response.status_code == 200:
if response.templates:
self.assertNotEqual(response.templates[0].name, 'features/index.html')
|
<commit_before><commit_msg>Add failing test for index route
Currently the index route gloms every other route.<commit_after>
|
from django.test import TestCase
class TestIndex(TestCase):
"""Verify the index page is served properly"""
def test_root(self):
# Fetch page from '/'
reponse = self.client.get('/')
# Should respond OK
self.assertEqual(reponse.status_code, 200)
# Should be rendered from a particular template
self.assertEqual(reponse.templates[0].name, 'features/index.html')
def test_only_root(self):
"""Should only be served from /"""
# Fetch from path that shouldn't exist
response = self.client.get('/extrastuff')
# Should 404 out
self.assertEqual(response.status_code, 404)
# Fetch from path that may exist
response = self.client.get('/api/')
# Make sure it's not the index if it does exist
if response.status_code == 200:
if response.templates:
self.assertNotEqual(response.templates[0].name, 'features/index.html')
|
Add failing test for index route
Currently the index route gloms every other route.from django.test import TestCase
class TestIndex(TestCase):
"""Verify the index page is served properly"""
def test_root(self):
# Fetch page from '/'
reponse = self.client.get('/')
# Should respond OK
self.assertEqual(reponse.status_code, 200)
# Should be rendered from a particular template
self.assertEqual(reponse.templates[0].name, 'features/index.html')
def test_only_root(self):
"""Should only be served from /"""
# Fetch from path that shouldn't exist
response = self.client.get('/extrastuff')
# Should 404 out
self.assertEqual(response.status_code, 404)
# Fetch from path that may exist
response = self.client.get('/api/')
# Make sure it's not the index if it does exist
if response.status_code == 200:
if response.templates:
self.assertNotEqual(response.templates[0].name, 'features/index.html')
|
<commit_before><commit_msg>Add failing test for index route
Currently the index route gloms every other route.<commit_after>from django.test import TestCase
class TestIndex(TestCase):
"""Verify the index page is served properly"""
def test_root(self):
# Fetch page from '/'
reponse = self.client.get('/')
# Should respond OK
self.assertEqual(reponse.status_code, 200)
# Should be rendered from a particular template
self.assertEqual(reponse.templates[0].name, 'features/index.html')
def test_only_root(self):
"""Should only be served from /"""
# Fetch from path that shouldn't exist
response = self.client.get('/extrastuff')
# Should 404 out
self.assertEqual(response.status_code, 404)
# Fetch from path that may exist
response = self.client.get('/api/')
# Make sure it's not the index if it does exist
if response.status_code == 200:
if response.templates:
self.assertNotEqual(response.templates[0].name, 'features/index.html')
|
|
3f7b54496826f496863de545a601c23c2c06427a
|
shipyard2/shipyard2/rules/javascripts.py
|
shipyard2/shipyard2/rules/javascripts.py
|
"""Helpers for writing rules for first-party JavaScript packages."""
__all__ = [
'define_package',
'find_package',
]
import dataclasses
import logging
import foreman
from g1 import scripts
from g1.bases.assertions import ASSERT
from shipyard2 import rules
LOG = logging.getLogger(__name__)
@dataclasses.dataclass(frozen=True)
class PackageRules:
build: foreman.Rule
def define_package(
*,
name_prefix='',
deps=(),
sub_directory_path=None,
):
"""Define a first-party package.
This defines:
* Rule: [name_prefix/]build.
"""
name_prefix = rules.canonicalize_name_prefix(name_prefix)
rule_build = name_prefix + 'build'
@foreman.rule(rule_build)
@foreman.rule.depend('//bases:build')
@foreman.rule.depend('//third-party/nodejs:build')
def build(parameters):
src_path = find_package(
parameters,
foreman.get_relpath(),
sub_directory_path,
)
LOG.info('build first-party package: %s', src_path)
with scripts.using_cwd(src_path):
scripts.run(['npm', 'install'])
scripts.run(['npm', 'run', 'build'])
for dep in deps:
build = build.depend(dep)
return PackageRules(build=build)
def find_package(parameters, relpath, sub_directory_path=None):
"""Find path to a first-party package."""
root_paths = parameters['//bases:roots']
for root_path in root_paths:
path = root_path / relpath
if sub_directory_path:
path /= sub_directory_path
if (path / 'package.json').is_file():
return path
return ASSERT.unreachable(
'cannot find package {} under: {}', relpath, root_paths
)
|
Add first-party JavaScript build rule library
|
Add first-party JavaScript build rule library
|
Python
|
mit
|
clchiou/garage,clchiou/garage,clchiou/garage,clchiou/garage
|
Add first-party JavaScript build rule library
|
"""Helpers for writing rules for first-party JavaScript packages."""
__all__ = [
'define_package',
'find_package',
]
import dataclasses
import logging
import foreman
from g1 import scripts
from g1.bases.assertions import ASSERT
from shipyard2 import rules
LOG = logging.getLogger(__name__)
@dataclasses.dataclass(frozen=True)
class PackageRules:
build: foreman.Rule
def define_package(
*,
name_prefix='',
deps=(),
sub_directory_path=None,
):
"""Define a first-party package.
This defines:
* Rule: [name_prefix/]build.
"""
name_prefix = rules.canonicalize_name_prefix(name_prefix)
rule_build = name_prefix + 'build'
@foreman.rule(rule_build)
@foreman.rule.depend('//bases:build')
@foreman.rule.depend('//third-party/nodejs:build')
def build(parameters):
src_path = find_package(
parameters,
foreman.get_relpath(),
sub_directory_path,
)
LOG.info('build first-party package: %s', src_path)
with scripts.using_cwd(src_path):
scripts.run(['npm', 'install'])
scripts.run(['npm', 'run', 'build'])
for dep in deps:
build = build.depend(dep)
return PackageRules(build=build)
def find_package(parameters, relpath, sub_directory_path=None):
"""Find path to a first-party package."""
root_paths = parameters['//bases:roots']
for root_path in root_paths:
path = root_path / relpath
if sub_directory_path:
path /= sub_directory_path
if (path / 'package.json').is_file():
return path
return ASSERT.unreachable(
'cannot find package {} under: {}', relpath, root_paths
)
|
<commit_before><commit_msg>Add first-party JavaScript build rule library<commit_after>
|
"""Helpers for writing rules for first-party JavaScript packages."""
__all__ = [
'define_package',
'find_package',
]
import dataclasses
import logging
import foreman
from g1 import scripts
from g1.bases.assertions import ASSERT
from shipyard2 import rules
LOG = logging.getLogger(__name__)
@dataclasses.dataclass(frozen=True)
class PackageRules:
build: foreman.Rule
def define_package(
*,
name_prefix='',
deps=(),
sub_directory_path=None,
):
"""Define a first-party package.
This defines:
* Rule: [name_prefix/]build.
"""
name_prefix = rules.canonicalize_name_prefix(name_prefix)
rule_build = name_prefix + 'build'
@foreman.rule(rule_build)
@foreman.rule.depend('//bases:build')
@foreman.rule.depend('//third-party/nodejs:build')
def build(parameters):
src_path = find_package(
parameters,
foreman.get_relpath(),
sub_directory_path,
)
LOG.info('build first-party package: %s', src_path)
with scripts.using_cwd(src_path):
scripts.run(['npm', 'install'])
scripts.run(['npm', 'run', 'build'])
for dep in deps:
build = build.depend(dep)
return PackageRules(build=build)
def find_package(parameters, relpath, sub_directory_path=None):
"""Find path to a first-party package."""
root_paths = parameters['//bases:roots']
for root_path in root_paths:
path = root_path / relpath
if sub_directory_path:
path /= sub_directory_path
if (path / 'package.json').is_file():
return path
return ASSERT.unreachable(
'cannot find package {} under: {}', relpath, root_paths
)
|
Add first-party JavaScript build rule library"""Helpers for writing rules for first-party JavaScript packages."""
__all__ = [
'define_package',
'find_package',
]
import dataclasses
import logging
import foreman
from g1 import scripts
from g1.bases.assertions import ASSERT
from shipyard2 import rules
LOG = logging.getLogger(__name__)
@dataclasses.dataclass(frozen=True)
class PackageRules:
build: foreman.Rule
def define_package(
*,
name_prefix='',
deps=(),
sub_directory_path=None,
):
"""Define a first-party package.
This defines:
* Rule: [name_prefix/]build.
"""
name_prefix = rules.canonicalize_name_prefix(name_prefix)
rule_build = name_prefix + 'build'
@foreman.rule(rule_build)
@foreman.rule.depend('//bases:build')
@foreman.rule.depend('//third-party/nodejs:build')
def build(parameters):
src_path = find_package(
parameters,
foreman.get_relpath(),
sub_directory_path,
)
LOG.info('build first-party package: %s', src_path)
with scripts.using_cwd(src_path):
scripts.run(['npm', 'install'])
scripts.run(['npm', 'run', 'build'])
for dep in deps:
build = build.depend(dep)
return PackageRules(build=build)
def find_package(parameters, relpath, sub_directory_path=None):
"""Find path to a first-party package."""
root_paths = parameters['//bases:roots']
for root_path in root_paths:
path = root_path / relpath
if sub_directory_path:
path /= sub_directory_path
if (path / 'package.json').is_file():
return path
return ASSERT.unreachable(
'cannot find package {} under: {}', relpath, root_paths
)
|
<commit_before><commit_msg>Add first-party JavaScript build rule library<commit_after>"""Helpers for writing rules for first-party JavaScript packages."""
__all__ = [
'define_package',
'find_package',
]
import dataclasses
import logging
import foreman
from g1 import scripts
from g1.bases.assertions import ASSERT
from shipyard2 import rules
LOG = logging.getLogger(__name__)
@dataclasses.dataclass(frozen=True)
class PackageRules:
build: foreman.Rule
def define_package(
*,
name_prefix='',
deps=(),
sub_directory_path=None,
):
"""Define a first-party package.
This defines:
* Rule: [name_prefix/]build.
"""
name_prefix = rules.canonicalize_name_prefix(name_prefix)
rule_build = name_prefix + 'build'
@foreman.rule(rule_build)
@foreman.rule.depend('//bases:build')
@foreman.rule.depend('//third-party/nodejs:build')
def build(parameters):
src_path = find_package(
parameters,
foreman.get_relpath(),
sub_directory_path,
)
LOG.info('build first-party package: %s', src_path)
with scripts.using_cwd(src_path):
scripts.run(['npm', 'install'])
scripts.run(['npm', 'run', 'build'])
for dep in deps:
build = build.depend(dep)
return PackageRules(build=build)
def find_package(parameters, relpath, sub_directory_path=None):
"""Find path to a first-party package."""
root_paths = parameters['//bases:roots']
for root_path in root_paths:
path = root_path / relpath
if sub_directory_path:
path /= sub_directory_path
if (path / 'package.json').is_file():
return path
return ASSERT.unreachable(
'cannot find package {} under: {}', relpath, root_paths
)
|
|
6a74915c3f197ef197a34514c7ff313ac0a68d2f
|
corehq/apps/fixtures/migrations/0002_rm_blobdb_domain_fixtures.py
|
corehq/apps/fixtures/migrations/0002_rm_blobdb_domain_fixtures.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-09-08 10:23
from __future__ import unicode_literals
from django.db import migrations
from corehq.blobs import get_blob_db
from corehq.sql_db.operations import HqRunPython
FIXTURE_BUCKET = 'domain-fixtures'
def rm_blobdb_domain_fixtures(apps, schema_editor):
get_blob_db().delete(bucket=FIXTURE_BUCKET)
def noop_reverse_migration(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('fixtures', '0001_initial'),
]
operations = [
HqRunPython(rm_blobdb_domain_fixtures, noop_reverse_migration),
]
|
Add migration to delete existing cache values
|
Add migration to delete existing cache values
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
|
Add migration to delete existing cache values
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-09-08 10:23
from __future__ import unicode_literals
from django.db import migrations
from corehq.blobs import get_blob_db
from corehq.sql_db.operations import HqRunPython
FIXTURE_BUCKET = 'domain-fixtures'
def rm_blobdb_domain_fixtures(apps, schema_editor):
get_blob_db().delete(bucket=FIXTURE_BUCKET)
def noop_reverse_migration(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('fixtures', '0001_initial'),
]
operations = [
HqRunPython(rm_blobdb_domain_fixtures, noop_reverse_migration),
]
|
<commit_before><commit_msg>Add migration to delete existing cache values<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-09-08 10:23
from __future__ import unicode_literals
from django.db import migrations
from corehq.blobs import get_blob_db
from corehq.sql_db.operations import HqRunPython
FIXTURE_BUCKET = 'domain-fixtures'
def rm_blobdb_domain_fixtures(apps, schema_editor):
get_blob_db().delete(bucket=FIXTURE_BUCKET)
def noop_reverse_migration(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('fixtures', '0001_initial'),
]
operations = [
HqRunPython(rm_blobdb_domain_fixtures, noop_reverse_migration),
]
|
Add migration to delete existing cache values# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-09-08 10:23
from __future__ import unicode_literals
from django.db import migrations
from corehq.blobs import get_blob_db
from corehq.sql_db.operations import HqRunPython
FIXTURE_BUCKET = 'domain-fixtures'
def rm_blobdb_domain_fixtures(apps, schema_editor):
get_blob_db().delete(bucket=FIXTURE_BUCKET)
def noop_reverse_migration(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('fixtures', '0001_initial'),
]
operations = [
HqRunPython(rm_blobdb_domain_fixtures, noop_reverse_migration),
]
|
<commit_before><commit_msg>Add migration to delete existing cache values<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-09-08 10:23
from __future__ import unicode_literals
from django.db import migrations
from corehq.blobs import get_blob_db
from corehq.sql_db.operations import HqRunPython
FIXTURE_BUCKET = 'domain-fixtures'
def rm_blobdb_domain_fixtures(apps, schema_editor):
get_blob_db().delete(bucket=FIXTURE_BUCKET)
def noop_reverse_migration(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('fixtures', '0001_initial'),
]
operations = [
HqRunPython(rm_blobdb_domain_fixtures, noop_reverse_migration),
]
|
|
e184b806c6170aad2bdee87c051ea6400e1d954e
|
tests/parser_test.py
|
tests/parser_test.py
|
import unittest
from clippings.parser import Document
class DocumentTest(unittest.TestCase):
def test_create_document(self):
title = 'Haunted'
authors = ['Chuck Palahniuk']
document = Document(title, authors)
self.assertEqual(title, document.title)
self.assertEqual(authors, document.authors)
def test_parse_document_with_single_author(self):
document_line = '1984 (George Orwell)'
document = Document.parse(document_line)
expected_authors = ['George Orwell']
self.assertEqual(expected_authors, document.authors)
expected_title = '1984'
self.assertEqual(expected_title, document.title)
def test_parse_document_with_multiple_authors(self):
document_line = 'Java Concurrency in Practice (Joshua Bloch;Brian Goetz)'
document = Document.parse(document_line)
expected_authors = [
'Joshua Bloch',
'Brian Goetz',
]
self.assertEqual(expected_authors, document.authors)
expected_title = 'Java Concurrency in Practice'
self.assertEqual(expected_title, document.title)
def test_document_to_string(self):
title = 'Also sprach Zarathustra'
authors = ['Friedrich Nietzsche']
document = Document(title, authors)
expected_string = 'Also sprach Zarathustra (Friedrich Nietzsche)'
self.assertEqual(expected_string, str(document))
def test_document_to_dict(self):
title = 'À la recherche du temps perdu'
authors = ['Marcel Proust']
document = Document(title, authors)
expected_dict = {
'title': title,
'authors': authors
}
self.assertEqual(expected_dict, document.to_dict())
|
Add unit tests for the Document class
|
Add unit tests for the Document class
|
Python
|
mit
|
samueldg/clippings
|
Add unit tests for the Document class
|
import unittest
from clippings.parser import Document
class DocumentTest(unittest.TestCase):
def test_create_document(self):
title = 'Haunted'
authors = ['Chuck Palahniuk']
document = Document(title, authors)
self.assertEqual(title, document.title)
self.assertEqual(authors, document.authors)
def test_parse_document_with_single_author(self):
document_line = '1984 (George Orwell)'
document = Document.parse(document_line)
expected_authors = ['George Orwell']
self.assertEqual(expected_authors, document.authors)
expected_title = '1984'
self.assertEqual(expected_title, document.title)
def test_parse_document_with_multiple_authors(self):
document_line = 'Java Concurrency in Practice (Joshua Bloch;Brian Goetz)'
document = Document.parse(document_line)
expected_authors = [
'Joshua Bloch',
'Brian Goetz',
]
self.assertEqual(expected_authors, document.authors)
expected_title = 'Java Concurrency in Practice'
self.assertEqual(expected_title, document.title)
def test_document_to_string(self):
title = 'Also sprach Zarathustra'
authors = ['Friedrich Nietzsche']
document = Document(title, authors)
expected_string = 'Also sprach Zarathustra (Friedrich Nietzsche)'
self.assertEqual(expected_string, str(document))
def test_document_to_dict(self):
title = 'À la recherche du temps perdu'
authors = ['Marcel Proust']
document = Document(title, authors)
expected_dict = {
'title': title,
'authors': authors
}
self.assertEqual(expected_dict, document.to_dict())
|
<commit_before><commit_msg>Add unit tests for the Document class<commit_after>
|
import unittest
from clippings.parser import Document
class DocumentTest(unittest.TestCase):
def test_create_document(self):
title = 'Haunted'
authors = ['Chuck Palahniuk']
document = Document(title, authors)
self.assertEqual(title, document.title)
self.assertEqual(authors, document.authors)
def test_parse_document_with_single_author(self):
document_line = '1984 (George Orwell)'
document = Document.parse(document_line)
expected_authors = ['George Orwell']
self.assertEqual(expected_authors, document.authors)
expected_title = '1984'
self.assertEqual(expected_title, document.title)
def test_parse_document_with_multiple_authors(self):
document_line = 'Java Concurrency in Practice (Joshua Bloch;Brian Goetz)'
document = Document.parse(document_line)
expected_authors = [
'Joshua Bloch',
'Brian Goetz',
]
self.assertEqual(expected_authors, document.authors)
expected_title = 'Java Concurrency in Practice'
self.assertEqual(expected_title, document.title)
def test_document_to_string(self):
title = 'Also sprach Zarathustra'
authors = ['Friedrich Nietzsche']
document = Document(title, authors)
expected_string = 'Also sprach Zarathustra (Friedrich Nietzsche)'
self.assertEqual(expected_string, str(document))
def test_document_to_dict(self):
title = 'À la recherche du temps perdu'
authors = ['Marcel Proust']
document = Document(title, authors)
expected_dict = {
'title': title,
'authors': authors
}
self.assertEqual(expected_dict, document.to_dict())
|
Add unit tests for the Document classimport unittest
from clippings.parser import Document
class DocumentTest(unittest.TestCase):
def test_create_document(self):
title = 'Haunted'
authors = ['Chuck Palahniuk']
document = Document(title, authors)
self.assertEqual(title, document.title)
self.assertEqual(authors, document.authors)
def test_parse_document_with_single_author(self):
document_line = '1984 (George Orwell)'
document = Document.parse(document_line)
expected_authors = ['George Orwell']
self.assertEqual(expected_authors, document.authors)
expected_title = '1984'
self.assertEqual(expected_title, document.title)
def test_parse_document_with_multiple_authors(self):
document_line = 'Java Concurrency in Practice (Joshua Bloch;Brian Goetz)'
document = Document.parse(document_line)
expected_authors = [
'Joshua Bloch',
'Brian Goetz',
]
self.assertEqual(expected_authors, document.authors)
expected_title = 'Java Concurrency in Practice'
self.assertEqual(expected_title, document.title)
def test_document_to_string(self):
title = 'Also sprach Zarathustra'
authors = ['Friedrich Nietzsche']
document = Document(title, authors)
expected_string = 'Also sprach Zarathustra (Friedrich Nietzsche)'
self.assertEqual(expected_string, str(document))
def test_document_to_dict(self):
title = 'À la recherche du temps perdu'
authors = ['Marcel Proust']
document = Document(title, authors)
expected_dict = {
'title': title,
'authors': authors
}
self.assertEqual(expected_dict, document.to_dict())
|
<commit_before><commit_msg>Add unit tests for the Document class<commit_after>import unittest
from clippings.parser import Document
class DocumentTest(unittest.TestCase):
def test_create_document(self):
title = 'Haunted'
authors = ['Chuck Palahniuk']
document = Document(title, authors)
self.assertEqual(title, document.title)
self.assertEqual(authors, document.authors)
def test_parse_document_with_single_author(self):
document_line = '1984 (George Orwell)'
document = Document.parse(document_line)
expected_authors = ['George Orwell']
self.assertEqual(expected_authors, document.authors)
expected_title = '1984'
self.assertEqual(expected_title, document.title)
def test_parse_document_with_multiple_authors(self):
document_line = 'Java Concurrency in Practice (Joshua Bloch;Brian Goetz)'
document = Document.parse(document_line)
expected_authors = [
'Joshua Bloch',
'Brian Goetz',
]
self.assertEqual(expected_authors, document.authors)
expected_title = 'Java Concurrency in Practice'
self.assertEqual(expected_title, document.title)
def test_document_to_string(self):
title = 'Also sprach Zarathustra'
authors = ['Friedrich Nietzsche']
document = Document(title, authors)
expected_string = 'Also sprach Zarathustra (Friedrich Nietzsche)'
self.assertEqual(expected_string, str(document))
def test_document_to_dict(self):
title = 'À la recherche du temps perdu'
authors = ['Marcel Proust']
document = Document(title, authors)
expected_dict = {
'title': title,
'authors': authors
}
self.assertEqual(expected_dict, document.to_dict())
|
|
e9cfb095ac4261c8bf959d1c9b904256c267178f
|
openfisca_web_api/tests/test_variables.py
|
openfisca_web_api/tests/test_variables.py
|
# -*- coding: utf-8 -*-
import json
from nose.tools import assert_equal, assert_greater, assert_in, assert_is_instance
from webob import Request
from . import common
def setup_module(module):
common.get_or_load_app()
def test_basic_call():
req = Request.blank('/api/1/variables', method = 'GET')
res = req.get_response(common.app)
assert_equal(res.status_code, 200, res.body)
res_json = json.loads(res.body)
assert_is_instance(res_json, dict)
assert_in('country_package_name', res_json)
assert_in('country_package_version', res_json)
assert_in('variables', res_json)
assert_is_instance(res_json['variables'], list)
source_file_path = res_json['variables'][0]['source_file_path']
assert source_file_path.startswith('model'), source_file_path
|
Add basic unit test about /variables endpoint
|
Add basic unit test about /variables endpoint
|
Python
|
agpl-3.0
|
openfisca/openfisca-web-api,openfisca/openfisca-web-api
|
Add basic unit test about /variables endpoint
|
# -*- coding: utf-8 -*-
import json
from nose.tools import assert_equal, assert_greater, assert_in, assert_is_instance
from webob import Request
from . import common
def setup_module(module):
common.get_or_load_app()
def test_basic_call():
req = Request.blank('/api/1/variables', method = 'GET')
res = req.get_response(common.app)
assert_equal(res.status_code, 200, res.body)
res_json = json.loads(res.body)
assert_is_instance(res_json, dict)
assert_in('country_package_name', res_json)
assert_in('country_package_version', res_json)
assert_in('variables', res_json)
assert_is_instance(res_json['variables'], list)
source_file_path = res_json['variables'][0]['source_file_path']
assert source_file_path.startswith('model'), source_file_path
|
<commit_before><commit_msg>Add basic unit test about /variables endpoint<commit_after>
|
# -*- coding: utf-8 -*-
import json
from nose.tools import assert_equal, assert_greater, assert_in, assert_is_instance
from webob import Request
from . import common
def setup_module(module):
common.get_or_load_app()
def test_basic_call():
req = Request.blank('/api/1/variables', method = 'GET')
res = req.get_response(common.app)
assert_equal(res.status_code, 200, res.body)
res_json = json.loads(res.body)
assert_is_instance(res_json, dict)
assert_in('country_package_name', res_json)
assert_in('country_package_version', res_json)
assert_in('variables', res_json)
assert_is_instance(res_json['variables'], list)
source_file_path = res_json['variables'][0]['source_file_path']
assert source_file_path.startswith('model'), source_file_path
|
Add basic unit test about /variables endpoint# -*- coding: utf-8 -*-
import json
from nose.tools import assert_equal, assert_greater, assert_in, assert_is_instance
from webob import Request
from . import common
def setup_module(module):
common.get_or_load_app()
def test_basic_call():
req = Request.blank('/api/1/variables', method = 'GET')
res = req.get_response(common.app)
assert_equal(res.status_code, 200, res.body)
res_json = json.loads(res.body)
assert_is_instance(res_json, dict)
assert_in('country_package_name', res_json)
assert_in('country_package_version', res_json)
assert_in('variables', res_json)
assert_is_instance(res_json['variables'], list)
source_file_path = res_json['variables'][0]['source_file_path']
assert source_file_path.startswith('model'), source_file_path
|
<commit_before><commit_msg>Add basic unit test about /variables endpoint<commit_after># -*- coding: utf-8 -*-
import json
from nose.tools import assert_equal, assert_greater, assert_in, assert_is_instance
from webob import Request
from . import common
def setup_module(module):
common.get_or_load_app()
def test_basic_call():
req = Request.blank('/api/1/variables', method = 'GET')
res = req.get_response(common.app)
assert_equal(res.status_code, 200, res.body)
res_json = json.loads(res.body)
assert_is_instance(res_json, dict)
assert_in('country_package_name', res_json)
assert_in('country_package_version', res_json)
assert_in('variables', res_json)
assert_is_instance(res_json['variables'], list)
source_file_path = res_json['variables'][0]['source_file_path']
assert source_file_path.startswith('model'), source_file_path
|
|
404c9b70cf9b6c27e0fb16be1556d01b5077a4f4
|
tests/test_regressions.py
|
tests/test_regressions.py
|
"""
"""
import time
import logging
import unittest
from flask import Flask
import mock
from flask.ext.limiter.extension import C, Limiter
class RegressionTests(unittest.TestCase):
def build_app(self, config={}, **limiter_args):
app = Flask(__name__)
for k,v in config.items():
app.config.setdefault(k,v)
limiter = Limiter(app, **limiter_args)
mock_handler = mock.Mock()
mock_handler.level = logging.INFO
limiter.logger.addHandler(mock_handler)
return app, limiter
def test_redis_request_slower_than_fixed_window(self):
app, limiter = self.build_app({
C.GLOBAL_LIMITS : "5 per second",
C.STORAGE_URL: "redis://localhost:6379",
C.STRATEGY: "fixed-window"
})
@app.route("/t1")
def t1():
time.sleep(1.1)
return "t1"
with app.test_client() as cli:
self.assertEqual(200, cli.get("/t1").status_code)
def test_redis_request_slower_than_moving_window(self):
app, limiter = self.build_app({
C.GLOBAL_LIMITS : "5 per second",
C.STORAGE_URL: "redis://localhost:6379",
C.STRATEGY: "moving-window"
})
@app.route("/t1")
def t1():
time.sleep(1.1)
return "t1"
with app.test_client() as cli:
self.assertEqual(200, cli.get("/t1").status_code)
|
Add test cases for 500 error with slow responses
|
Add test cases for 500 error with slow responses
when using redis as the rate limit storage and the response
is slower than the rate limit window, 500 errors occur as the
key is not available post request for constructing the rate limit
headers.
|
Python
|
mit
|
alisaifee/flask-limiter,joshfriend/flask-limiter,alisaifee/limits,alisaifee/limits,joshfriend/flask-limiter,alisaifee/flask-limiter
|
Add test cases for 500 error with slow responses
when using redis as the rate limit storage and the response
is slower than the rate limit window, 500 errors occur as the
key is not available post request for constructing the rate limit
headers.
|
"""
"""
import time
import logging
import unittest
from flask import Flask
import mock
from flask.ext.limiter.extension import C, Limiter
class RegressionTests(unittest.TestCase):
def build_app(self, config={}, **limiter_args):
app = Flask(__name__)
for k,v in config.items():
app.config.setdefault(k,v)
limiter = Limiter(app, **limiter_args)
mock_handler = mock.Mock()
mock_handler.level = logging.INFO
limiter.logger.addHandler(mock_handler)
return app, limiter
def test_redis_request_slower_than_fixed_window(self):
app, limiter = self.build_app({
C.GLOBAL_LIMITS : "5 per second",
C.STORAGE_URL: "redis://localhost:6379",
C.STRATEGY: "fixed-window"
})
@app.route("/t1")
def t1():
time.sleep(1.1)
return "t1"
with app.test_client() as cli:
self.assertEqual(200, cli.get("/t1").status_code)
def test_redis_request_slower_than_moving_window(self):
app, limiter = self.build_app({
C.GLOBAL_LIMITS : "5 per second",
C.STORAGE_URL: "redis://localhost:6379",
C.STRATEGY: "moving-window"
})
@app.route("/t1")
def t1():
time.sleep(1.1)
return "t1"
with app.test_client() as cli:
self.assertEqual(200, cli.get("/t1").status_code)
|
<commit_before><commit_msg>Add test cases for 500 error with slow responses
when using redis as the rate limit storage and the response
is slower than the rate limit window, 500 errors occur as the
key is not available post request for constructing the rate limit
headers.<commit_after>
|
"""
"""
import time
import logging
import unittest
from flask import Flask
import mock
from flask.ext.limiter.extension import C, Limiter
class RegressionTests(unittest.TestCase):
def build_app(self, config={}, **limiter_args):
app = Flask(__name__)
for k,v in config.items():
app.config.setdefault(k,v)
limiter = Limiter(app, **limiter_args)
mock_handler = mock.Mock()
mock_handler.level = logging.INFO
limiter.logger.addHandler(mock_handler)
return app, limiter
def test_redis_request_slower_than_fixed_window(self):
app, limiter = self.build_app({
C.GLOBAL_LIMITS : "5 per second",
C.STORAGE_URL: "redis://localhost:6379",
C.STRATEGY: "fixed-window"
})
@app.route("/t1")
def t1():
time.sleep(1.1)
return "t1"
with app.test_client() as cli:
self.assertEqual(200, cli.get("/t1").status_code)
def test_redis_request_slower_than_moving_window(self):
app, limiter = self.build_app({
C.GLOBAL_LIMITS : "5 per second",
C.STORAGE_URL: "redis://localhost:6379",
C.STRATEGY: "moving-window"
})
@app.route("/t1")
def t1():
time.sleep(1.1)
return "t1"
with app.test_client() as cli:
self.assertEqual(200, cli.get("/t1").status_code)
|
Add test cases for 500 error with slow responses
when using redis as the rate limit storage and the response
is slower than the rate limit window, 500 errors occur as the
key is not available post request for constructing the rate limit
headers."""
"""
import time
import logging
import unittest
from flask import Flask
import mock
from flask.ext.limiter.extension import C, Limiter
class RegressionTests(unittest.TestCase):
def build_app(self, config={}, **limiter_args):
app = Flask(__name__)
for k,v in config.items():
app.config.setdefault(k,v)
limiter = Limiter(app, **limiter_args)
mock_handler = mock.Mock()
mock_handler.level = logging.INFO
limiter.logger.addHandler(mock_handler)
return app, limiter
def test_redis_request_slower_than_fixed_window(self):
app, limiter = self.build_app({
C.GLOBAL_LIMITS : "5 per second",
C.STORAGE_URL: "redis://localhost:6379",
C.STRATEGY: "fixed-window"
})
@app.route("/t1")
def t1():
time.sleep(1.1)
return "t1"
with app.test_client() as cli:
self.assertEqual(200, cli.get("/t1").status_code)
def test_redis_request_slower_than_moving_window(self):
app, limiter = self.build_app({
C.GLOBAL_LIMITS : "5 per second",
C.STORAGE_URL: "redis://localhost:6379",
C.STRATEGY: "moving-window"
})
@app.route("/t1")
def t1():
time.sleep(1.1)
return "t1"
with app.test_client() as cli:
self.assertEqual(200, cli.get("/t1").status_code)
|
<commit_before><commit_msg>Add test cases for 500 error with slow responses
when using redis as the rate limit storage and the response
is slower than the rate limit window, 500 errors occur as the
key is not available post request for constructing the rate limit
headers.<commit_after>"""
"""
import time
import logging
import unittest
from flask import Flask
import mock
from flask.ext.limiter.extension import C, Limiter
class RegressionTests(unittest.TestCase):
def build_app(self, config={}, **limiter_args):
app = Flask(__name__)
for k,v in config.items():
app.config.setdefault(k,v)
limiter = Limiter(app, **limiter_args)
mock_handler = mock.Mock()
mock_handler.level = logging.INFO
limiter.logger.addHandler(mock_handler)
return app, limiter
def test_redis_request_slower_than_fixed_window(self):
app, limiter = self.build_app({
C.GLOBAL_LIMITS : "5 per second",
C.STORAGE_URL: "redis://localhost:6379",
C.STRATEGY: "fixed-window"
})
@app.route("/t1")
def t1():
time.sleep(1.1)
return "t1"
with app.test_client() as cli:
self.assertEqual(200, cli.get("/t1").status_code)
def test_redis_request_slower_than_moving_window(self):
app, limiter = self.build_app({
C.GLOBAL_LIMITS : "5 per second",
C.STORAGE_URL: "redis://localhost:6379",
C.STRATEGY: "moving-window"
})
@app.route("/t1")
def t1():
time.sleep(1.1)
return "t1"
with app.test_client() as cli:
self.assertEqual(200, cli.get("/t1").status_code)
|
|
27f187d3cc5725b6ed912e15ecafb38a44cc4992
|
tests/unit/utils/test_win_service.py
|
tests/unit/utils/test_win_service.py
|
# Import Python Libs
import os
# Import Salt Libs
import salt.utils.platform
# Import Salt Testing Libs
from tests.support.mock import patch, MagicMock
from tests.support.unit import TestCase, skipIf
try:
import salt.utils.win_service as win_service
from salt.exceptions import CommandExecutionError
except Exception as exc: # pylint: disable=broad-except
win_service = exc
# Import 3rd Party Libs
try:
import pywintypes
import win32service
WINAPI = True
except ImportError:
WINAPI = False
class WinServiceImportTestCase(TestCase):
def test_import(self):
"""
Simply importing should not raise an error, especially on Linux
"""
if isinstance(win_service, Exception):
raise Exception(
"Importing win_system caused traceback: {0}".format(win_service)
)
@skipIf(not salt.utils.platform.is_windows(), "Only test on Windows systems")
@skipIf(not WINAPI, "Missing PyWin32 libraries")
class WinServiceTestCase(TestCase):
"""
Test cases for salt.utils.win_service
"""
def test_info(self):
"""
Test service.info
"""
# Get info about the spooler service
info = win_service.info("spooler")
# Make sure it returns these fields
field_names = [
"BinaryPath",
"ControlsAccepted",
"Dependencies",
"Description",
"DisplayName",
"ErrorControl",
"LoadOrderGroup",
"ServiceAccount",
"ServiceType",
"StartType",
"StartTypeDelayed",
"Status",
"Status_CheckPoint",
"Status_ExitCode",
"Status_ServiceCode",
"Status_WaitHint",
"TagID",
"sid"
]
for field_name in field_names:
self.assertIn(field_name, info)
# Make sure it returns a valid Display Name
self.assertEqual(info["DisplayName"], "Print Spooler")
|
Add unit tests for new service util
|
Add unit tests for new service util
|
Python
|
apache-2.0
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
Add unit tests for new service util
|
# Import Python Libs
import os
# Import Salt Libs
import salt.utils.platform
# Import Salt Testing Libs
from tests.support.mock import patch, MagicMock
from tests.support.unit import TestCase, skipIf
try:
import salt.utils.win_service as win_service
from salt.exceptions import CommandExecutionError
except Exception as exc: # pylint: disable=broad-except
win_service = exc
# Import 3rd Party Libs
try:
import pywintypes
import win32service
WINAPI = True
except ImportError:
WINAPI = False
class WinServiceImportTestCase(TestCase):
def test_import(self):
"""
Simply importing should not raise an error, especially on Linux
"""
if isinstance(win_service, Exception):
raise Exception(
"Importing win_system caused traceback: {0}".format(win_service)
)
@skipIf(not salt.utils.platform.is_windows(), "Only test on Windows systems")
@skipIf(not WINAPI, "Missing PyWin32 libraries")
class WinServiceTestCase(TestCase):
"""
Test cases for salt.utils.win_service
"""
def test_info(self):
"""
Test service.info
"""
# Get info about the spooler service
info = win_service.info("spooler")
# Make sure it returns these fields
field_names = [
"BinaryPath",
"ControlsAccepted",
"Dependencies",
"Description",
"DisplayName",
"ErrorControl",
"LoadOrderGroup",
"ServiceAccount",
"ServiceType",
"StartType",
"StartTypeDelayed",
"Status",
"Status_CheckPoint",
"Status_ExitCode",
"Status_ServiceCode",
"Status_WaitHint",
"TagID",
"sid"
]
for field_name in field_names:
self.assertIn(field_name, info)
# Make sure it returns a valid Display Name
self.assertEqual(info["DisplayName"], "Print Spooler")
|
<commit_before><commit_msg>Add unit tests for new service util<commit_after>
|
# Import Python Libs
import os
# Import Salt Libs
import salt.utils.platform
# Import Salt Testing Libs
from tests.support.mock import patch, MagicMock
from tests.support.unit import TestCase, skipIf
try:
import salt.utils.win_service as win_service
from salt.exceptions import CommandExecutionError
except Exception as exc: # pylint: disable=broad-except
win_service = exc
# Import 3rd Party Libs
try:
import pywintypes
import win32service
WINAPI = True
except ImportError:
WINAPI = False
class WinServiceImportTestCase(TestCase):
def test_import(self):
"""
Simply importing should not raise an error, especially on Linux
"""
if isinstance(win_service, Exception):
raise Exception(
"Importing win_system caused traceback: {0}".format(win_service)
)
@skipIf(not salt.utils.platform.is_windows(), "Only test on Windows systems")
@skipIf(not WINAPI, "Missing PyWin32 libraries")
class WinServiceTestCase(TestCase):
"""
Test cases for salt.utils.win_service
"""
def test_info(self):
"""
Test service.info
"""
# Get info about the spooler service
info = win_service.info("spooler")
# Make sure it returns these fields
field_names = [
"BinaryPath",
"ControlsAccepted",
"Dependencies",
"Description",
"DisplayName",
"ErrorControl",
"LoadOrderGroup",
"ServiceAccount",
"ServiceType",
"StartType",
"StartTypeDelayed",
"Status",
"Status_CheckPoint",
"Status_ExitCode",
"Status_ServiceCode",
"Status_WaitHint",
"TagID",
"sid"
]
for field_name in field_names:
self.assertIn(field_name, info)
# Make sure it returns a valid Display Name
self.assertEqual(info["DisplayName"], "Print Spooler")
|
Add unit tests for new service util# Import Python Libs
import os
# Import Salt Libs
import salt.utils.platform
# Import Salt Testing Libs
from tests.support.mock import patch, MagicMock
from tests.support.unit import TestCase, skipIf
try:
import salt.utils.win_service as win_service
from salt.exceptions import CommandExecutionError
except Exception as exc: # pylint: disable=broad-except
win_service = exc
# Import 3rd Party Libs
try:
import pywintypes
import win32service
WINAPI = True
except ImportError:
WINAPI = False
class WinServiceImportTestCase(TestCase):
def test_import(self):
"""
Simply importing should not raise an error, especially on Linux
"""
if isinstance(win_service, Exception):
raise Exception(
"Importing win_system caused traceback: {0}".format(win_service)
)
@skipIf(not salt.utils.platform.is_windows(), "Only test on Windows systems")
@skipIf(not WINAPI, "Missing PyWin32 libraries")
class WinServiceTestCase(TestCase):
"""
Test cases for salt.utils.win_service
"""
def test_info(self):
"""
Test service.info
"""
# Get info about the spooler service
info = win_service.info("spooler")
# Make sure it returns these fields
field_names = [
"BinaryPath",
"ControlsAccepted",
"Dependencies",
"Description",
"DisplayName",
"ErrorControl",
"LoadOrderGroup",
"ServiceAccount",
"ServiceType",
"StartType",
"StartTypeDelayed",
"Status",
"Status_CheckPoint",
"Status_ExitCode",
"Status_ServiceCode",
"Status_WaitHint",
"TagID",
"sid"
]
for field_name in field_names:
self.assertIn(field_name, info)
# Make sure it returns a valid Display Name
self.assertEqual(info["DisplayName"], "Print Spooler")
|
<commit_before><commit_msg>Add unit tests for new service util<commit_after># Import Python Libs
import os
# Import Salt Libs
import salt.utils.platform
# Import Salt Testing Libs
from tests.support.mock import patch, MagicMock
from tests.support.unit import TestCase, skipIf
try:
import salt.utils.win_service as win_service
from salt.exceptions import CommandExecutionError
except Exception as exc: # pylint: disable=broad-except
win_service = exc
# Import 3rd Party Libs
try:
import pywintypes
import win32service
WINAPI = True
except ImportError:
WINAPI = False
class WinServiceImportTestCase(TestCase):
def test_import(self):
"""
Simply importing should not raise an error, especially on Linux
"""
if isinstance(win_service, Exception):
raise Exception(
"Importing win_system caused traceback: {0}".format(win_service)
)
@skipIf(not salt.utils.platform.is_windows(), "Only test on Windows systems")
@skipIf(not WINAPI, "Missing PyWin32 libraries")
class WinServiceTestCase(TestCase):
"""
Test cases for salt.utils.win_service
"""
def test_info(self):
"""
Test service.info
"""
# Get info about the spooler service
info = win_service.info("spooler")
# Make sure it returns these fields
field_names = [
"BinaryPath",
"ControlsAccepted",
"Dependencies",
"Description",
"DisplayName",
"ErrorControl",
"LoadOrderGroup",
"ServiceAccount",
"ServiceType",
"StartType",
"StartTypeDelayed",
"Status",
"Status_CheckPoint",
"Status_ExitCode",
"Status_ServiceCode",
"Status_WaitHint",
"TagID",
"sid"
]
for field_name in field_names:
self.assertIn(field_name, info)
# Make sure it returns a valid Display Name
self.assertEqual(info["DisplayName"], "Print Spooler")
|
|
71bbd57214cd8be6ac8583884eb1fc2e5b270eb8
|
ideascube/conf/idb_fra_emmaus.py
|
ideascube/conf/idb_fra_emmaus.py
|
# -*- coding: utf-8 -*-
"""Ideaxbox for Emmaus, France"""
from .idb import * # noqa
from django.utils.translation import ugettext_lazy as _
IDEASCUBE_NAME = u"Emmaus"
IDEASCUBE_PLACE_NAME = _("city")
COUNTRIES_FIRST = ['FR']
TIME_ZONE = None
LANGUAGE_CODE = 'fr'
LOAN_DURATION = 14
MONITORING_ENTRY_EXPORT_FIELDS = ['serial', 'user_id', 'birth_year', 'gender']
USER_FORM_FIELDS = (
('Ideasbox', ['serial', 'box_awareness']),
(_('Personal informations'), ['short_name', 'full_name', 'birth_year', 'gender', 'id_card_number']), # noqa
(_('Family'), ['marital_status', 'family_status', 'children_under_12', 'children_under_18', 'children_above_18']), # noqa
(_('In the town'), ['current_occupation', 'school_level']),
(_('Language skills'), ['en_level']),
)
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'library',
},
{
'id': 'mediacenter',
},
{
'id': 'wikipedia',
'languages': ['fr', 'ar', 'fa']
},
{
'id': 'gutenberg',
'lang': 'fr',
},
{
'id': 'khanacademy',
},
{
'id': 'cest-pas-sorcier',
},
{
'id': 'wikisource',
'languages': ['fr']
},
{
'id': 'wikibooks',
'languages': ['fr']
},
{
'id': 'wikivoyage',
'languages': ['fr']
},
{
'id': 'wiktionary',
'languages': ['fr']
},
{
'id': 'wikiversity',
'languages': ['fr']
},
{
'id': 'ted',
'sessions': [
('tedxgeneva2014.fr', 'Geneva 2014'),
('tedxlausanne2012.fr', 'Lausanne 2012'),
('tedxlausanne2013.fr', 'Lausanne 2013'),
('tedxlausanne2014.fr', 'Lausanne 2014'),
]
},
]
|
Add conf file for Emmaus Ideasbox in France
|
Add conf file for Emmaus Ideasbox in France
|
Python
|
agpl-3.0
|
ideascube/ideascube,ideascube/ideascube,ideascube/ideascube,ideascube/ideascube
|
Add conf file for Emmaus Ideasbox in France
|
# -*- coding: utf-8 -*-
"""Ideaxbox for Emmaus, France"""
from .idb import * # noqa
from django.utils.translation import ugettext_lazy as _
IDEASCUBE_NAME = u"Emmaus"
IDEASCUBE_PLACE_NAME = _("city")
COUNTRIES_FIRST = ['FR']
TIME_ZONE = None
LANGUAGE_CODE = 'fr'
LOAN_DURATION = 14
MONITORING_ENTRY_EXPORT_FIELDS = ['serial', 'user_id', 'birth_year', 'gender']
USER_FORM_FIELDS = (
('Ideasbox', ['serial', 'box_awareness']),
(_('Personal informations'), ['short_name', 'full_name', 'birth_year', 'gender', 'id_card_number']), # noqa
(_('Family'), ['marital_status', 'family_status', 'children_under_12', 'children_under_18', 'children_above_18']), # noqa
(_('In the town'), ['current_occupation', 'school_level']),
(_('Language skills'), ['en_level']),
)
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'library',
},
{
'id': 'mediacenter',
},
{
'id': 'wikipedia',
'languages': ['fr', 'ar', 'fa']
},
{
'id': 'gutenberg',
'lang': 'fr',
},
{
'id': 'khanacademy',
},
{
'id': 'cest-pas-sorcier',
},
{
'id': 'wikisource',
'languages': ['fr']
},
{
'id': 'wikibooks',
'languages': ['fr']
},
{
'id': 'wikivoyage',
'languages': ['fr']
},
{
'id': 'wiktionary',
'languages': ['fr']
},
{
'id': 'wikiversity',
'languages': ['fr']
},
{
'id': 'ted',
'sessions': [
('tedxgeneva2014.fr', 'Geneva 2014'),
('tedxlausanne2012.fr', 'Lausanne 2012'),
('tedxlausanne2013.fr', 'Lausanne 2013'),
('tedxlausanne2014.fr', 'Lausanne 2014'),
]
},
]
|
<commit_before><commit_msg>Add conf file for Emmaus Ideasbox in France<commit_after>
|
# -*- coding: utf-8 -*-
"""Ideaxbox for Emmaus, France"""
from .idb import * # noqa
from django.utils.translation import ugettext_lazy as _
IDEASCUBE_NAME = u"Emmaus"
IDEASCUBE_PLACE_NAME = _("city")
COUNTRIES_FIRST = ['FR']
TIME_ZONE = None
LANGUAGE_CODE = 'fr'
LOAN_DURATION = 14
MONITORING_ENTRY_EXPORT_FIELDS = ['serial', 'user_id', 'birth_year', 'gender']
USER_FORM_FIELDS = (
('Ideasbox', ['serial', 'box_awareness']),
(_('Personal informations'), ['short_name', 'full_name', 'birth_year', 'gender', 'id_card_number']), # noqa
(_('Family'), ['marital_status', 'family_status', 'children_under_12', 'children_under_18', 'children_above_18']), # noqa
(_('In the town'), ['current_occupation', 'school_level']),
(_('Language skills'), ['en_level']),
)
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'library',
},
{
'id': 'mediacenter',
},
{
'id': 'wikipedia',
'languages': ['fr', 'ar', 'fa']
},
{
'id': 'gutenberg',
'lang': 'fr',
},
{
'id': 'khanacademy',
},
{
'id': 'cest-pas-sorcier',
},
{
'id': 'wikisource',
'languages': ['fr']
},
{
'id': 'wikibooks',
'languages': ['fr']
},
{
'id': 'wikivoyage',
'languages': ['fr']
},
{
'id': 'wiktionary',
'languages': ['fr']
},
{
'id': 'wikiversity',
'languages': ['fr']
},
{
'id': 'ted',
'sessions': [
('tedxgeneva2014.fr', 'Geneva 2014'),
('tedxlausanne2012.fr', 'Lausanne 2012'),
('tedxlausanne2013.fr', 'Lausanne 2013'),
('tedxlausanne2014.fr', 'Lausanne 2014'),
]
},
]
|
Add conf file for Emmaus Ideasbox in France# -*- coding: utf-8 -*-
"""Ideaxbox for Emmaus, France"""
from .idb import * # noqa
from django.utils.translation import ugettext_lazy as _
IDEASCUBE_NAME = u"Emmaus"
IDEASCUBE_PLACE_NAME = _("city")
COUNTRIES_FIRST = ['FR']
TIME_ZONE = None
LANGUAGE_CODE = 'fr'
LOAN_DURATION = 14
MONITORING_ENTRY_EXPORT_FIELDS = ['serial', 'user_id', 'birth_year', 'gender']
USER_FORM_FIELDS = (
('Ideasbox', ['serial', 'box_awareness']),
(_('Personal informations'), ['short_name', 'full_name', 'birth_year', 'gender', 'id_card_number']), # noqa
(_('Family'), ['marital_status', 'family_status', 'children_under_12', 'children_under_18', 'children_above_18']), # noqa
(_('In the town'), ['current_occupation', 'school_level']),
(_('Language skills'), ['en_level']),
)
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'library',
},
{
'id': 'mediacenter',
},
{
'id': 'wikipedia',
'languages': ['fr', 'ar', 'fa']
},
{
'id': 'gutenberg',
'lang': 'fr',
},
{
'id': 'khanacademy',
},
{
'id': 'cest-pas-sorcier',
},
{
'id': 'wikisource',
'languages': ['fr']
},
{
'id': 'wikibooks',
'languages': ['fr']
},
{
'id': 'wikivoyage',
'languages': ['fr']
},
{
'id': 'wiktionary',
'languages': ['fr']
},
{
'id': 'wikiversity',
'languages': ['fr']
},
{
'id': 'ted',
'sessions': [
('tedxgeneva2014.fr', 'Geneva 2014'),
('tedxlausanne2012.fr', 'Lausanne 2012'),
('tedxlausanne2013.fr', 'Lausanne 2013'),
('tedxlausanne2014.fr', 'Lausanne 2014'),
]
},
]
|
<commit_before><commit_msg>Add conf file for Emmaus Ideasbox in France<commit_after># -*- coding: utf-8 -*-
"""Ideaxbox for Emmaus, France"""
from .idb import * # noqa
from django.utils.translation import ugettext_lazy as _
IDEASCUBE_NAME = u"Emmaus"
IDEASCUBE_PLACE_NAME = _("city")
COUNTRIES_FIRST = ['FR']
TIME_ZONE = None
LANGUAGE_CODE = 'fr'
LOAN_DURATION = 14
MONITORING_ENTRY_EXPORT_FIELDS = ['serial', 'user_id', 'birth_year', 'gender']
USER_FORM_FIELDS = (
('Ideasbox', ['serial', 'box_awareness']),
(_('Personal informations'), ['short_name', 'full_name', 'birth_year', 'gender', 'id_card_number']), # noqa
(_('Family'), ['marital_status', 'family_status', 'children_under_12', 'children_under_18', 'children_above_18']), # noqa
(_('In the town'), ['current_occupation', 'school_level']),
(_('Language skills'), ['en_level']),
)
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'library',
},
{
'id': 'mediacenter',
},
{
'id': 'wikipedia',
'languages': ['fr', 'ar', 'fa']
},
{
'id': 'gutenberg',
'lang': 'fr',
},
{
'id': 'khanacademy',
},
{
'id': 'cest-pas-sorcier',
},
{
'id': 'wikisource',
'languages': ['fr']
},
{
'id': 'wikibooks',
'languages': ['fr']
},
{
'id': 'wikivoyage',
'languages': ['fr']
},
{
'id': 'wiktionary',
'languages': ['fr']
},
{
'id': 'wikiversity',
'languages': ['fr']
},
{
'id': 'ted',
'sessions': [
('tedxgeneva2014.fr', 'Geneva 2014'),
('tedxlausanne2012.fr', 'Lausanne 2012'),
('tedxlausanne2013.fr', 'Lausanne 2013'),
('tedxlausanne2014.fr', 'Lausanne 2014'),
]
},
]
|
|
f3eb1c8efbcd3695dba0037faa4f90328625f547
|
permcomb.py
|
permcomb.py
|
#!/usr/bin/python
import itertools
import sys
def combination(elements,items):
for combination in itertools.product(xrange(elements), repeat=items):
print ''.join(map(str, combination))
if len(sys.argv) == 3:
allSet = int(sys.argv[1])
setItems = int(sys.argv[2])
if allSet >= setItems:
combination(allSet,setItems)
else:
print "[-] Set Items Should be greater than the Elements."
print " Example : permcomb.py 10 4"
else:
print "[-] Please Supply Two Arguments."
print " Example : percomb.py 10 5"
|
Add script for creating numeric passwordlists using permutation & combination.
|
Add script for creating numeric passwordlists using permutation & combination.
|
Python
|
cc0-1.0
|
JohnTroony/Scriptology,JohnTroony/Scriptology,JohnTroony/Scriptology,JohnTroony/Scriptology,JohnTroony/Scriptology,JohnTroony/Scriptology
|
Add script for creating numeric passwordlists using permutation & combination.
|
#!/usr/bin/python
import itertools
import sys
def combination(elements,items):
for combination in itertools.product(xrange(elements), repeat=items):
print ''.join(map(str, combination))
if len(sys.argv) == 3:
allSet = int(sys.argv[1])
setItems = int(sys.argv[2])
if allSet >= setItems:
combination(allSet,setItems)
else:
print "[-] Set Items Should be greater than the Elements."
print " Example : permcomb.py 10 4"
else:
print "[-] Please Supply Two Arguments."
print " Example : percomb.py 10 5"
|
<commit_before><commit_msg>Add script for creating numeric passwordlists using permutation & combination.<commit_after>
|
#!/usr/bin/python
import itertools
import sys
def combination(elements,items):
for combination in itertools.product(xrange(elements), repeat=items):
print ''.join(map(str, combination))
if len(sys.argv) == 3:
allSet = int(sys.argv[1])
setItems = int(sys.argv[2])
if allSet >= setItems:
combination(allSet,setItems)
else:
print "[-] Set Items Should be greater than the Elements."
print " Example : permcomb.py 10 4"
else:
print "[-] Please Supply Two Arguments."
print " Example : percomb.py 10 5"
|
Add script for creating numeric passwordlists using permutation & combination.#!/usr/bin/python
import itertools
import sys
def combination(elements,items):
for combination in itertools.product(xrange(elements), repeat=items):
print ''.join(map(str, combination))
if len(sys.argv) == 3:
allSet = int(sys.argv[1])
setItems = int(sys.argv[2])
if allSet >= setItems:
combination(allSet,setItems)
else:
print "[-] Set Items Should be greater than the Elements."
print " Example : permcomb.py 10 4"
else:
print "[-] Please Supply Two Arguments."
print " Example : percomb.py 10 5"
|
<commit_before><commit_msg>Add script for creating numeric passwordlists using permutation & combination.<commit_after>#!/usr/bin/python
import itertools
import sys
def combination(elements,items):
for combination in itertools.product(xrange(elements), repeat=items):
print ''.join(map(str, combination))
if len(sys.argv) == 3:
allSet = int(sys.argv[1])
setItems = int(sys.argv[2])
if allSet >= setItems:
combination(allSet,setItems)
else:
print "[-] Set Items Should be greater than the Elements."
print " Example : permcomb.py 10 4"
else:
print "[-] Please Supply Two Arguments."
print " Example : percomb.py 10 5"
|
|
b01076381ebc91f20c527f1632c7b3f2aa82d39a
|
perftest.py
|
perftest.py
|
"""
Simple peformance tests.
"""
import sys
import time
import couchdb
def main():
print 'sys.version : %r' % (sys.version,)
print 'sys.platform : %r' % (sys.platform,)
tests = [create_doc, create_bulk_docs]
if len(sys.argv) > 1:
tests = [test for test in tests if test.__name__ in sys.argv[1:]]
server = couchdb.Server()
for test in tests:
_run(server, test)
def _run(server, func):
"""Run a test in a clean db and log its execution time."""
sys.stdout.write("* [%s] %s ... " % (func.__name__, func.__doc__.strip()))
sys.stdout.flush()
db_name = 'couchdb-python/perftest'
db = server.create(db_name)
try:
start = time.time()
func(db)
stop = time.time()
sys.stdout.write("%0.2fs\n" % (stop-start,))
sys.stdout.flush()
except Exception, e:
sys.stdout.write("FAILED - %r\n" % (unicode(e),))
sys.stdout.flush()
finally:
server.delete(db_name)
def create_doc(db):
"""Create lots of docs, one at a time"""
for i in range(1000):
db.save({'_id': unicode(i)})
def create_bulk_docs(db):
"""Create lots of docs, lots at a time"""
batch_size = 100
num_batches = 1000
for i in range(num_batches):
db.update([{'_id': unicode((i * batch_size) + j)} for j in range(batch_size)])
if __name__ == '__main__':
main()
|
Add a very simple performance testing tool.
|
Add a very simple performance testing tool.
|
Python
|
bsd-3-clause
|
ajmirsky/couchdb-python
|
Add a very simple performance testing tool.
|
"""
Simple peformance tests.
"""
import sys
import time
import couchdb
def main():
print 'sys.version : %r' % (sys.version,)
print 'sys.platform : %r' % (sys.platform,)
tests = [create_doc, create_bulk_docs]
if len(sys.argv) > 1:
tests = [test for test in tests if test.__name__ in sys.argv[1:]]
server = couchdb.Server()
for test in tests:
_run(server, test)
def _run(server, func):
"""Run a test in a clean db and log its execution time."""
sys.stdout.write("* [%s] %s ... " % (func.__name__, func.__doc__.strip()))
sys.stdout.flush()
db_name = 'couchdb-python/perftest'
db = server.create(db_name)
try:
start = time.time()
func(db)
stop = time.time()
sys.stdout.write("%0.2fs\n" % (stop-start,))
sys.stdout.flush()
except Exception, e:
sys.stdout.write("FAILED - %r\n" % (unicode(e),))
sys.stdout.flush()
finally:
server.delete(db_name)
def create_doc(db):
"""Create lots of docs, one at a time"""
for i in range(1000):
db.save({'_id': unicode(i)})
def create_bulk_docs(db):
"""Create lots of docs, lots at a time"""
batch_size = 100
num_batches = 1000
for i in range(num_batches):
db.update([{'_id': unicode((i * batch_size) + j)} for j in range(batch_size)])
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a very simple performance testing tool.<commit_after>
|
"""
Simple peformance tests.
"""
import sys
import time
import couchdb
def main():
print 'sys.version : %r' % (sys.version,)
print 'sys.platform : %r' % (sys.platform,)
tests = [create_doc, create_bulk_docs]
if len(sys.argv) > 1:
tests = [test for test in tests if test.__name__ in sys.argv[1:]]
server = couchdb.Server()
for test in tests:
_run(server, test)
def _run(server, func):
"""Run a test in a clean db and log its execution time."""
sys.stdout.write("* [%s] %s ... " % (func.__name__, func.__doc__.strip()))
sys.stdout.flush()
db_name = 'couchdb-python/perftest'
db = server.create(db_name)
try:
start = time.time()
func(db)
stop = time.time()
sys.stdout.write("%0.2fs\n" % (stop-start,))
sys.stdout.flush()
except Exception, e:
sys.stdout.write("FAILED - %r\n" % (unicode(e),))
sys.stdout.flush()
finally:
server.delete(db_name)
def create_doc(db):
"""Create lots of docs, one at a time"""
for i in range(1000):
db.save({'_id': unicode(i)})
def create_bulk_docs(db):
"""Create lots of docs, lots at a time"""
batch_size = 100
num_batches = 1000
for i in range(num_batches):
db.update([{'_id': unicode((i * batch_size) + j)} for j in range(batch_size)])
if __name__ == '__main__':
main()
|
Add a very simple performance testing tool."""
Simple peformance tests.
"""
import sys
import time
import couchdb
def main():
print 'sys.version : %r' % (sys.version,)
print 'sys.platform : %r' % (sys.platform,)
tests = [create_doc, create_bulk_docs]
if len(sys.argv) > 1:
tests = [test for test in tests if test.__name__ in sys.argv[1:]]
server = couchdb.Server()
for test in tests:
_run(server, test)
def _run(server, func):
"""Run a test in a clean db and log its execution time."""
sys.stdout.write("* [%s] %s ... " % (func.__name__, func.__doc__.strip()))
sys.stdout.flush()
db_name = 'couchdb-python/perftest'
db = server.create(db_name)
try:
start = time.time()
func(db)
stop = time.time()
sys.stdout.write("%0.2fs\n" % (stop-start,))
sys.stdout.flush()
except Exception, e:
sys.stdout.write("FAILED - %r\n" % (unicode(e),))
sys.stdout.flush()
finally:
server.delete(db_name)
def create_doc(db):
"""Create lots of docs, one at a time"""
for i in range(1000):
db.save({'_id': unicode(i)})
def create_bulk_docs(db):
"""Create lots of docs, lots at a time"""
batch_size = 100
num_batches = 1000
for i in range(num_batches):
db.update([{'_id': unicode((i * batch_size) + j)} for j in range(batch_size)])
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a very simple performance testing tool.<commit_after>"""
Simple peformance tests.
"""
import sys
import time
import couchdb
def main():
print 'sys.version : %r' % (sys.version,)
print 'sys.platform : %r' % (sys.platform,)
tests = [create_doc, create_bulk_docs]
if len(sys.argv) > 1:
tests = [test for test in tests if test.__name__ in sys.argv[1:]]
server = couchdb.Server()
for test in tests:
_run(server, test)
def _run(server, func):
"""Run a test in a clean db and log its execution time."""
sys.stdout.write("* [%s] %s ... " % (func.__name__, func.__doc__.strip()))
sys.stdout.flush()
db_name = 'couchdb-python/perftest'
db = server.create(db_name)
try:
start = time.time()
func(db)
stop = time.time()
sys.stdout.write("%0.2fs\n" % (stop-start,))
sys.stdout.flush()
except Exception, e:
sys.stdout.write("FAILED - %r\n" % (unicode(e),))
sys.stdout.flush()
finally:
server.delete(db_name)
def create_doc(db):
"""Create lots of docs, one at a time"""
for i in range(1000):
db.save({'_id': unicode(i)})
def create_bulk_docs(db):
"""Create lots of docs, lots at a time"""
batch_size = 100
num_batches = 1000
for i in range(num_batches):
db.update([{'_id': unicode((i * batch_size) + j)} for j in range(batch_size)])
if __name__ == '__main__':
main()
|
|
fbf91352da4cf16be8462f57c71aa9f86f21746f
|
amaranth/data_analysis/class_balance.py
|
amaranth/data_analysis/class_balance.py
|
# Lint as: python3
"""This script checks the balance of classes in the FDC dataset.
Classes are split based on LOW_CALORIE_THRESHOLD and
HIGH_CALORIE_THRESHOLD in the amaranth module.
"""
import os
import pandas as pd
import amaranth
from amaranth.ml import lib
FDC_DATA_DIR = '../../data/fdc/'
def main():
# Read in calorie data
current_dir = os.path.dirname(__file__)
abs_fdc_data_dir = os.path.join(current_dir, FDC_DATA_DIR)
food = pd.read_csv(os.path.join(abs_fdc_data_dir, 'food.csv'))
nutrient = pd.read_csv(os.path.join(
abs_fdc_data_dir, 'nutrient.csv')).rename(columns={'id': 'nutrient_id'})
food_nutrient = pd.read_csv(
os.path.join(abs_fdc_data_dir, 'food_nutrient.csv'))
combined = lib.combine_dataframes('fdc_id', food, food_nutrient)
combined = lib.combine_dataframes('nutrient_id', combined, nutrient)
calorie_data = lib.get_calorie_data(combined, 'kcal')
calorie_data = calorie_data[[
'description', 'data_type', 'name', 'amount', 'unit_name'
]] # Keep only relevant cols
calorie_data = lib.clean_data(calorie_data)
# Count rows with low, avg, or high calorie labels
low_cal_cnt = 0
avg_cal_cnt = 0
hi_cal_cnt = 0
for _, row in calorie_data.iterrows():
cal = row['amount']
if cal < amaranth.LOW_CALORIE_THRESHOLD:
low_cal_cnt += 1
elif cal < amaranth.HIGH_CALORIE_THRESHOLD:
avg_cal_cnt += 1
else:
hi_cal_cnt += 1
print('Class balance in FDC Dataset:')
print(f'Low calorie: {low_cal_cnt/len(calorie_data)}')
print(f'Average calorie: {avg_cal_cnt/len(calorie_data)}')
print(f'High calorie: {hi_cal_cnt/len(calorie_data)}')
if __name__ == '__main__':
main()
|
Add class balance checking code
|
Add class balance checking code
|
Python
|
apache-2.0
|
googleinterns/amaranth,googleinterns/amaranth
|
Add class balance checking code
|
# Lint as: python3
"""This script checks the balance of classes in the FDC dataset.
Classes are split based on LOW_CALORIE_THRESHOLD and
HIGH_CALORIE_THRESHOLD in the amaranth module.
"""
import os
import pandas as pd
import amaranth
from amaranth.ml import lib
FDC_DATA_DIR = '../../data/fdc/'
def main():
# Read in calorie data
current_dir = os.path.dirname(__file__)
abs_fdc_data_dir = os.path.join(current_dir, FDC_DATA_DIR)
food = pd.read_csv(os.path.join(abs_fdc_data_dir, 'food.csv'))
nutrient = pd.read_csv(os.path.join(
abs_fdc_data_dir, 'nutrient.csv')).rename(columns={'id': 'nutrient_id'})
food_nutrient = pd.read_csv(
os.path.join(abs_fdc_data_dir, 'food_nutrient.csv'))
combined = lib.combine_dataframes('fdc_id', food, food_nutrient)
combined = lib.combine_dataframes('nutrient_id', combined, nutrient)
calorie_data = lib.get_calorie_data(combined, 'kcal')
calorie_data = calorie_data[[
'description', 'data_type', 'name', 'amount', 'unit_name'
]] # Keep only relevant cols
calorie_data = lib.clean_data(calorie_data)
# Count rows with low, avg, or high calorie labels
low_cal_cnt = 0
avg_cal_cnt = 0
hi_cal_cnt = 0
for _, row in calorie_data.iterrows():
cal = row['amount']
if cal < amaranth.LOW_CALORIE_THRESHOLD:
low_cal_cnt += 1
elif cal < amaranth.HIGH_CALORIE_THRESHOLD:
avg_cal_cnt += 1
else:
hi_cal_cnt += 1
print('Class balance in FDC Dataset:')
print(f'Low calorie: {low_cal_cnt/len(calorie_data)}')
print(f'Average calorie: {avg_cal_cnt/len(calorie_data)}')
print(f'High calorie: {hi_cal_cnt/len(calorie_data)}')
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add class balance checking code<commit_after>
|
# Lint as: python3
"""This script checks the balance of classes in the FDC dataset.
Classes are split based on LOW_CALORIE_THRESHOLD and
HIGH_CALORIE_THRESHOLD in the amaranth module.
"""
import os
import pandas as pd
import amaranth
from amaranth.ml import lib
FDC_DATA_DIR = '../../data/fdc/'
def main():
# Read in calorie data
current_dir = os.path.dirname(__file__)
abs_fdc_data_dir = os.path.join(current_dir, FDC_DATA_DIR)
food = pd.read_csv(os.path.join(abs_fdc_data_dir, 'food.csv'))
nutrient = pd.read_csv(os.path.join(
abs_fdc_data_dir, 'nutrient.csv')).rename(columns={'id': 'nutrient_id'})
food_nutrient = pd.read_csv(
os.path.join(abs_fdc_data_dir, 'food_nutrient.csv'))
combined = lib.combine_dataframes('fdc_id', food, food_nutrient)
combined = lib.combine_dataframes('nutrient_id', combined, nutrient)
calorie_data = lib.get_calorie_data(combined, 'kcal')
calorie_data = calorie_data[[
'description', 'data_type', 'name', 'amount', 'unit_name'
]] # Keep only relevant cols
calorie_data = lib.clean_data(calorie_data)
# Count rows with low, avg, or high calorie labels
low_cal_cnt = 0
avg_cal_cnt = 0
hi_cal_cnt = 0
for _, row in calorie_data.iterrows():
cal = row['amount']
if cal < amaranth.LOW_CALORIE_THRESHOLD:
low_cal_cnt += 1
elif cal < amaranth.HIGH_CALORIE_THRESHOLD:
avg_cal_cnt += 1
else:
hi_cal_cnt += 1
print('Class balance in FDC Dataset:')
print(f'Low calorie: {low_cal_cnt/len(calorie_data)}')
print(f'Average calorie: {avg_cal_cnt/len(calorie_data)}')
print(f'High calorie: {hi_cal_cnt/len(calorie_data)}')
if __name__ == '__main__':
main()
|
Add class balance checking code# Lint as: python3
"""This script checks the balance of classes in the FDC dataset.
Classes are split based on LOW_CALORIE_THRESHOLD and
HIGH_CALORIE_THRESHOLD in the amaranth module.
"""
import os
import pandas as pd
import amaranth
from amaranth.ml import lib
FDC_DATA_DIR = '../../data/fdc/'
def main():
# Read in calorie data
current_dir = os.path.dirname(__file__)
abs_fdc_data_dir = os.path.join(current_dir, FDC_DATA_DIR)
food = pd.read_csv(os.path.join(abs_fdc_data_dir, 'food.csv'))
nutrient = pd.read_csv(os.path.join(
abs_fdc_data_dir, 'nutrient.csv')).rename(columns={'id': 'nutrient_id'})
food_nutrient = pd.read_csv(
os.path.join(abs_fdc_data_dir, 'food_nutrient.csv'))
combined = lib.combine_dataframes('fdc_id', food, food_nutrient)
combined = lib.combine_dataframes('nutrient_id', combined, nutrient)
calorie_data = lib.get_calorie_data(combined, 'kcal')
calorie_data = calorie_data[[
'description', 'data_type', 'name', 'amount', 'unit_name'
]] # Keep only relevant cols
calorie_data = lib.clean_data(calorie_data)
# Count rows with low, avg, or high calorie labels
low_cal_cnt = 0
avg_cal_cnt = 0
hi_cal_cnt = 0
for _, row in calorie_data.iterrows():
cal = row['amount']
if cal < amaranth.LOW_CALORIE_THRESHOLD:
low_cal_cnt += 1
elif cal < amaranth.HIGH_CALORIE_THRESHOLD:
avg_cal_cnt += 1
else:
hi_cal_cnt += 1
print('Class balance in FDC Dataset:')
print(f'Low calorie: {low_cal_cnt/len(calorie_data)}')
print(f'Average calorie: {avg_cal_cnt/len(calorie_data)}')
print(f'High calorie: {hi_cal_cnt/len(calorie_data)}')
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add class balance checking code<commit_after># Lint as: python3
"""This script checks the balance of classes in the FDC dataset.
Classes are split based on LOW_CALORIE_THRESHOLD and
HIGH_CALORIE_THRESHOLD in the amaranth module.
"""
import os
import pandas as pd
import amaranth
from amaranth.ml import lib
FDC_DATA_DIR = '../../data/fdc/'
def main():
# Read in calorie data
current_dir = os.path.dirname(__file__)
abs_fdc_data_dir = os.path.join(current_dir, FDC_DATA_DIR)
food = pd.read_csv(os.path.join(abs_fdc_data_dir, 'food.csv'))
nutrient = pd.read_csv(os.path.join(
abs_fdc_data_dir, 'nutrient.csv')).rename(columns={'id': 'nutrient_id'})
food_nutrient = pd.read_csv(
os.path.join(abs_fdc_data_dir, 'food_nutrient.csv'))
combined = lib.combine_dataframes('fdc_id', food, food_nutrient)
combined = lib.combine_dataframes('nutrient_id', combined, nutrient)
calorie_data = lib.get_calorie_data(combined, 'kcal')
calorie_data = calorie_data[[
'description', 'data_type', 'name', 'amount', 'unit_name'
]] # Keep only relevant cols
calorie_data = lib.clean_data(calorie_data)
# Count rows with low, avg, or high calorie labels
low_cal_cnt = 0
avg_cal_cnt = 0
hi_cal_cnt = 0
for _, row in calorie_data.iterrows():
cal = row['amount']
if cal < amaranth.LOW_CALORIE_THRESHOLD:
low_cal_cnt += 1
elif cal < amaranth.HIGH_CALORIE_THRESHOLD:
avg_cal_cnt += 1
else:
hi_cal_cnt += 1
print('Class balance in FDC Dataset:')
print(f'Low calorie: {low_cal_cnt/len(calorie_data)}')
print(f'Average calorie: {avg_cal_cnt/len(calorie_data)}')
print(f'High calorie: {hi_cal_cnt/len(calorie_data)}')
if __name__ == '__main__':
main()
|
|
93b2d93098c395d866f18e51b6ac42a9ba81a9b5
|
exp/modelselect/RealDataSVMExp.py
|
exp/modelselect/RealDataSVMExp.py
|
"""
Observe if C varies when we use more examples
"""
import logging
import numpy
import sys
import multiprocessing
from apgl.util.PathDefaults import PathDefaults
from apgl.predictors.AbstractPredictor import computeTestError
from exp.modelselect.ModelSelectUtils import ModelSelectUtils
from apgl.util.Sampling import Sampling
from apgl.predictors.LibSVM import LibSVM
import matplotlib.pyplot as plt
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
numpy.seterr(all="raise")
numpy.random.seed(45)
dataDir = PathDefaults.getDataDir()
dataDir += "modelPenalisation/regression/"
outputDir = PathDefaults.getOutputDir() + "modelPenalisation/regression/CART/"
figInd = 0
loadMethod = ModelSelectUtils.loadRegressDataset
datasets = ModelSelectUtils.getRegressionDatasets(True)
datasetName = datasets[9][0]
#sampleSizes = numpy.array([50, 100, 200])
sampleSizes = numpy.array([50, 100, 200])
foldsSet = numpy.arange(2, 13, 1)
alpha = 1.0
paramDict = {}
paramDict["setC"] = 2.0**numpy.arange(-10, 14, 2, dtype=numpy.float)
paramDict["setGamma"] = 2.0**numpy.arange(-10, 4, 2, dtype=numpy.float)
paramDict["setEpsilon"] = numpy.array([2**-2])
sampleMethod = Sampling.crossValidation
numProcesses = multiprocessing.cpu_count()
j = 0
trainX, trainY, testX, testY = ModelSelectUtils.loadRegressDataset(dataDir, datasetName, j)
learner = LibSVM(kernel='gaussian', type="Epsilon_SVR", processes=numProcesses)
for sampleSize in sampleSizes:
print("Sample size " +str(sampleSize))
trainInds = numpy.random.permutation(trainX.shape[0])[0:sampleSize]
validX = trainX[trainInds,:]
validY = trainY[trainInds]
folds = 5
idx = sampleMethod(folds, sampleSize)
meanErrors = learner.parallelPenaltyGrid(validX, validY, testX, testY, paramDict, computeTestError)
meanErrors = numpy.squeeze(meanErrors)
for i in range(paramDict["setGamma"].shape[0]):
plt.figure(i)
plt.plot(numpy.arange(paramDict["setC"].shape[0]), meanErrors[i, :], label=str(sampleSize))
plt.legend(loc="upper left")
plt.xlabel("C")
plt.ylabel("Error")
plt.show()
|
Test if C changes with more examples.
|
Test if C changes with more examples.
|
Python
|
bsd-3-clause
|
charanpald/APGL
|
Test if C changes with more examples.
|
"""
Observe if C varies when we use more examples
"""
import logging
import numpy
import sys
import multiprocessing
from apgl.util.PathDefaults import PathDefaults
from apgl.predictors.AbstractPredictor import computeTestError
from exp.modelselect.ModelSelectUtils import ModelSelectUtils
from apgl.util.Sampling import Sampling
from apgl.predictors.LibSVM import LibSVM
import matplotlib.pyplot as plt
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
numpy.seterr(all="raise")
numpy.random.seed(45)
dataDir = PathDefaults.getDataDir()
dataDir += "modelPenalisation/regression/"
outputDir = PathDefaults.getOutputDir() + "modelPenalisation/regression/CART/"
figInd = 0
loadMethod = ModelSelectUtils.loadRegressDataset
datasets = ModelSelectUtils.getRegressionDatasets(True)
datasetName = datasets[9][0]
#sampleSizes = numpy.array([50, 100, 200])
sampleSizes = numpy.array([50, 100, 200])
foldsSet = numpy.arange(2, 13, 1)
alpha = 1.0
paramDict = {}
paramDict["setC"] = 2.0**numpy.arange(-10, 14, 2, dtype=numpy.float)
paramDict["setGamma"] = 2.0**numpy.arange(-10, 4, 2, dtype=numpy.float)
paramDict["setEpsilon"] = numpy.array([2**-2])
sampleMethod = Sampling.crossValidation
numProcesses = multiprocessing.cpu_count()
j = 0
trainX, trainY, testX, testY = ModelSelectUtils.loadRegressDataset(dataDir, datasetName, j)
learner = LibSVM(kernel='gaussian', type="Epsilon_SVR", processes=numProcesses)
for sampleSize in sampleSizes:
print("Sample size " +str(sampleSize))
trainInds = numpy.random.permutation(trainX.shape[0])[0:sampleSize]
validX = trainX[trainInds,:]
validY = trainY[trainInds]
folds = 5
idx = sampleMethod(folds, sampleSize)
meanErrors = learner.parallelPenaltyGrid(validX, validY, testX, testY, paramDict, computeTestError)
meanErrors = numpy.squeeze(meanErrors)
for i in range(paramDict["setGamma"].shape[0]):
plt.figure(i)
plt.plot(numpy.arange(paramDict["setC"].shape[0]), meanErrors[i, :], label=str(sampleSize))
plt.legend(loc="upper left")
plt.xlabel("C")
plt.ylabel("Error")
plt.show()
|
<commit_before><commit_msg>Test if C changes with more examples. <commit_after>
|
"""
Observe if C varies when we use more examples
"""
import logging
import numpy
import sys
import multiprocessing
from apgl.util.PathDefaults import PathDefaults
from apgl.predictors.AbstractPredictor import computeTestError
from exp.modelselect.ModelSelectUtils import ModelSelectUtils
from apgl.util.Sampling import Sampling
from apgl.predictors.LibSVM import LibSVM
import matplotlib.pyplot as plt
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
numpy.seterr(all="raise")
numpy.random.seed(45)
dataDir = PathDefaults.getDataDir()
dataDir += "modelPenalisation/regression/"
outputDir = PathDefaults.getOutputDir() + "modelPenalisation/regression/CART/"
figInd = 0
loadMethod = ModelSelectUtils.loadRegressDataset
datasets = ModelSelectUtils.getRegressionDatasets(True)
datasetName = datasets[9][0]
#sampleSizes = numpy.array([50, 100, 200])
sampleSizes = numpy.array([50, 100, 200])
foldsSet = numpy.arange(2, 13, 1)
alpha = 1.0
paramDict = {}
paramDict["setC"] = 2.0**numpy.arange(-10, 14, 2, dtype=numpy.float)
paramDict["setGamma"] = 2.0**numpy.arange(-10, 4, 2, dtype=numpy.float)
paramDict["setEpsilon"] = numpy.array([2**-2])
sampleMethod = Sampling.crossValidation
numProcesses = multiprocessing.cpu_count()
j = 0
trainX, trainY, testX, testY = ModelSelectUtils.loadRegressDataset(dataDir, datasetName, j)
learner = LibSVM(kernel='gaussian', type="Epsilon_SVR", processes=numProcesses)
for sampleSize in sampleSizes:
print("Sample size " +str(sampleSize))
trainInds = numpy.random.permutation(trainX.shape[0])[0:sampleSize]
validX = trainX[trainInds,:]
validY = trainY[trainInds]
folds = 5
idx = sampleMethod(folds, sampleSize)
meanErrors = learner.parallelPenaltyGrid(validX, validY, testX, testY, paramDict, computeTestError)
meanErrors = numpy.squeeze(meanErrors)
for i in range(paramDict["setGamma"].shape[0]):
plt.figure(i)
plt.plot(numpy.arange(paramDict["setC"].shape[0]), meanErrors[i, :], label=str(sampleSize))
plt.legend(loc="upper left")
plt.xlabel("C")
plt.ylabel("Error")
plt.show()
|
Test if C changes with more examples. """
Observe if C varies when we use more examples
"""
import logging
import numpy
import sys
import multiprocessing
from apgl.util.PathDefaults import PathDefaults
from apgl.predictors.AbstractPredictor import computeTestError
from exp.modelselect.ModelSelectUtils import ModelSelectUtils
from apgl.util.Sampling import Sampling
from apgl.predictors.LibSVM import LibSVM
import matplotlib.pyplot as plt
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
numpy.seterr(all="raise")
numpy.random.seed(45)
dataDir = PathDefaults.getDataDir()
dataDir += "modelPenalisation/regression/"
outputDir = PathDefaults.getOutputDir() + "modelPenalisation/regression/CART/"
figInd = 0
loadMethod = ModelSelectUtils.loadRegressDataset
datasets = ModelSelectUtils.getRegressionDatasets(True)
datasetName = datasets[9][0]
#sampleSizes = numpy.array([50, 100, 200])
sampleSizes = numpy.array([50, 100, 200])
foldsSet = numpy.arange(2, 13, 1)
alpha = 1.0
paramDict = {}
paramDict["setC"] = 2.0**numpy.arange(-10, 14, 2, dtype=numpy.float)
paramDict["setGamma"] = 2.0**numpy.arange(-10, 4, 2, dtype=numpy.float)
paramDict["setEpsilon"] = numpy.array([2**-2])
sampleMethod = Sampling.crossValidation
numProcesses = multiprocessing.cpu_count()
j = 0
trainX, trainY, testX, testY = ModelSelectUtils.loadRegressDataset(dataDir, datasetName, j)
learner = LibSVM(kernel='gaussian', type="Epsilon_SVR", processes=numProcesses)
for sampleSize in sampleSizes:
print("Sample size " +str(sampleSize))
trainInds = numpy.random.permutation(trainX.shape[0])[0:sampleSize]
validX = trainX[trainInds,:]
validY = trainY[trainInds]
folds = 5
idx = sampleMethod(folds, sampleSize)
meanErrors = learner.parallelPenaltyGrid(validX, validY, testX, testY, paramDict, computeTestError)
meanErrors = numpy.squeeze(meanErrors)
for i in range(paramDict["setGamma"].shape[0]):
plt.figure(i)
plt.plot(numpy.arange(paramDict["setC"].shape[0]), meanErrors[i, :], label=str(sampleSize))
plt.legend(loc="upper left")
plt.xlabel("C")
plt.ylabel("Error")
plt.show()
|
<commit_before><commit_msg>Test if C changes with more examples. <commit_after>"""
Observe if C varies when we use more examples
"""
import logging
import numpy
import sys
import multiprocessing
from apgl.util.PathDefaults import PathDefaults
from apgl.predictors.AbstractPredictor import computeTestError
from exp.modelselect.ModelSelectUtils import ModelSelectUtils
from apgl.util.Sampling import Sampling
from apgl.predictors.LibSVM import LibSVM
import matplotlib.pyplot as plt
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
numpy.seterr(all="raise")
numpy.random.seed(45)
dataDir = PathDefaults.getDataDir()
dataDir += "modelPenalisation/regression/"
outputDir = PathDefaults.getOutputDir() + "modelPenalisation/regression/CART/"
figInd = 0
loadMethod = ModelSelectUtils.loadRegressDataset
datasets = ModelSelectUtils.getRegressionDatasets(True)
datasetName = datasets[9][0]
#sampleSizes = numpy.array([50, 100, 200])
sampleSizes = numpy.array([50, 100, 200])
foldsSet = numpy.arange(2, 13, 1)
alpha = 1.0
paramDict = {}
paramDict["setC"] = 2.0**numpy.arange(-10, 14, 2, dtype=numpy.float)
paramDict["setGamma"] = 2.0**numpy.arange(-10, 4, 2, dtype=numpy.float)
paramDict["setEpsilon"] = numpy.array([2**-2])
sampleMethod = Sampling.crossValidation
numProcesses = multiprocessing.cpu_count()
j = 0
trainX, trainY, testX, testY = ModelSelectUtils.loadRegressDataset(dataDir, datasetName, j)
learner = LibSVM(kernel='gaussian', type="Epsilon_SVR", processes=numProcesses)
for sampleSize in sampleSizes:
print("Sample size " +str(sampleSize))
trainInds = numpy.random.permutation(trainX.shape[0])[0:sampleSize]
validX = trainX[trainInds,:]
validY = trainY[trainInds]
folds = 5
idx = sampleMethod(folds, sampleSize)
meanErrors = learner.parallelPenaltyGrid(validX, validY, testX, testY, paramDict, computeTestError)
meanErrors = numpy.squeeze(meanErrors)
for i in range(paramDict["setGamma"].shape[0]):
plt.figure(i)
plt.plot(numpy.arange(paramDict["setC"].shape[0]), meanErrors[i, :], label=str(sampleSize))
plt.legend(loc="upper left")
plt.xlabel("C")
plt.ylabel("Error")
plt.show()
|
|
7997d02e52172b8ad0e96a845f953f90a6e739b7
|
scripts/examples/02-Board-Control/vsync_gpio_output.py
|
scripts/examples/02-Board-Control/vsync_gpio_output.py
|
# VSYNC GPIO output example.
#
# This example shows how to toggle the IR LED pin on VSYNC interrupt.
import sensor, image, time
from pyb import Pin
sensor.reset() # Reset and initialize the sensor.
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
sensor.skip_frames(time = 2000) # Wait for settings take effect.
# IR LED pin object
ir_led_pin = Pin('LED_IR', Pin.OUT_PP, Pin.PULL_NONE)
# This pin will be toggled on/off on VSYNC (start of frame) interrupt.
sensor.set_vsync_output(ir_led_pin)
clock = time.clock() # Create a clock object to track the FPS.
while(True):
clock.tick() # Update the FPS clock.
img = sensor.snapshot() # Take a picture and return the image.
# Turn off the IR LED after snapshot.
ir_led_pin.off()
print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected
# to the IDE. The FPS should increase once disconnected.
|
Add VSYNC GPIO output example.
|
Add VSYNC GPIO output example.
|
Python
|
mit
|
iabdalkader/openmv,kwagyeman/openmv,openmv/openmv,kwagyeman/openmv,kwagyeman/openmv,iabdalkader/openmv,openmv/openmv,openmv/openmv,iabdalkader/openmv,kwagyeman/openmv,openmv/openmv,iabdalkader/openmv
|
Add VSYNC GPIO output example.
|
# VSYNC GPIO output example.
#
# This example shows how to toggle the IR LED pin on VSYNC interrupt.
import sensor, image, time
from pyb import Pin
sensor.reset() # Reset and initialize the sensor.
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
sensor.skip_frames(time = 2000) # Wait for settings take effect.
# IR LED pin object
ir_led_pin = Pin('LED_IR', Pin.OUT_PP, Pin.PULL_NONE)
# This pin will be toggled on/off on VSYNC (start of frame) interrupt.
sensor.set_vsync_output(ir_led_pin)
clock = time.clock() # Create a clock object to track the FPS.
while(True):
clock.tick() # Update the FPS clock.
img = sensor.snapshot() # Take a picture and return the image.
# Turn off the IR LED after snapshot.
ir_led_pin.off()
print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected
# to the IDE. The FPS should increase once disconnected.
|
<commit_before><commit_msg>Add VSYNC GPIO output example.<commit_after>
|
# VSYNC GPIO output example.
#
# This example shows how to toggle the IR LED pin on VSYNC interrupt.
import sensor, image, time
from pyb import Pin
sensor.reset() # Reset and initialize the sensor.
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
sensor.skip_frames(time = 2000) # Wait for settings take effect.
# IR LED pin object
ir_led_pin = Pin('LED_IR', Pin.OUT_PP, Pin.PULL_NONE)
# This pin will be toggled on/off on VSYNC (start of frame) interrupt.
sensor.set_vsync_output(ir_led_pin)
clock = time.clock() # Create a clock object to track the FPS.
while(True):
clock.tick() # Update the FPS clock.
img = sensor.snapshot() # Take a picture and return the image.
# Turn off the IR LED after snapshot.
ir_led_pin.off()
print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected
# to the IDE. The FPS should increase once disconnected.
|
Add VSYNC GPIO output example.# VSYNC GPIO output example.
#
# This example shows how to toggle the IR LED pin on VSYNC interrupt.
import sensor, image, time
from pyb import Pin
sensor.reset() # Reset and initialize the sensor.
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
sensor.skip_frames(time = 2000) # Wait for settings take effect.
# IR LED pin object
ir_led_pin = Pin('LED_IR', Pin.OUT_PP, Pin.PULL_NONE)
# This pin will be toggled on/off on VSYNC (start of frame) interrupt.
sensor.set_vsync_output(ir_led_pin)
clock = time.clock() # Create a clock object to track the FPS.
while(True):
clock.tick() # Update the FPS clock.
img = sensor.snapshot() # Take a picture and return the image.
# Turn off the IR LED after snapshot.
ir_led_pin.off()
print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected
# to the IDE. The FPS should increase once disconnected.
|
<commit_before><commit_msg>Add VSYNC GPIO output example.<commit_after># VSYNC GPIO output example.
#
# This example shows how to toggle the IR LED pin on VSYNC interrupt.
import sensor, image, time
from pyb import Pin
sensor.reset() # Reset and initialize the sensor.
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
sensor.skip_frames(time = 2000) # Wait for settings take effect.
# IR LED pin object
ir_led_pin = Pin('LED_IR', Pin.OUT_PP, Pin.PULL_NONE)
# This pin will be toggled on/off on VSYNC (start of frame) interrupt.
sensor.set_vsync_output(ir_led_pin)
clock = time.clock() # Create a clock object to track the FPS.
while(True):
clock.tick() # Update the FPS clock.
img = sensor.snapshot() # Take a picture and return the image.
# Turn off the IR LED after snapshot.
ir_led_pin.off()
print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected
# to the IDE. The FPS should increase once disconnected.
|
|
2585b44484b175bb116c228496069cc4269440c0
|
hoomd/md/test-py/test_angle_cosinesq.py
|
hoomd/md/test-py/test_angle_cosinesq.py
|
# -*- coding: iso-8859-1 -*-
# Maintainer: joaander
from hoomd import *
from hoomd import md
context.initialize()
import unittest
import os
import numpy
# tests md.angle.cosinesq
class angle_cosinesq_tests (unittest.TestCase):
def setUp(self):
print
snap = data.make_snapshot(N=40,
box=data.boxdim(L=100),
particle_types = ['A'],
bond_types = [],
angle_types = ['angleA'],
dihedral_types = [],
improper_types = [])
if comm.get_rank() == 0:
snap.angles.resize(10);
for i in range(10):
x = numpy.array([i, 0, 0], dtype=numpy.float32)
snap.particles.position[4*i+0,:] = x;
x += numpy.random.random(3)
snap.particles.position[4*i+1,:] = x;
x += numpy.random.random(3)
snap.particles.position[4*i+2,:] = x;
x += numpy.random.random(3)
snap.particles.position[4*i+3,:] = x;
snap.angles.group[i,:] = [4*i+0, 4*i+1, 4*i+2];
init.read_snapshot(snap)
context.current.sorter.set_params(grid=8)
# test to see that se can create an md.angle.cosinesq
def test_create(self):
md.angle.cosinesq();
# test setting coefficients
def test_set_coeff(self):
cosinesq = md.angle.cosinesq();
cosinesq.angle_coeff.set('angleA', k=1.0, t0=0.78125)
all = group.all();
md.integrate.mode_standard(dt=0.005);
md.integrate.nve(all);
run(100);
# test coefficient not set checking
def test_set_coeff_fail(self):
cosinesq = md.angle.harmonic();
all = group.all();
md.integrate.mode_standard(dt=0.005);
md.integrate.nve(all);
self.assertRaises(RuntimeError, run, 100);
def tearDown(self):
context.initialize();
if __name__ == '__main__':
unittest.main(argv = ['test.py', '-v'])
|
Add python tests for cosine squared angles
|
Add python tests for cosine squared angles
|
Python
|
bsd-3-clause
|
joaander/hoomd-blue,joaander/hoomd-blue,joaander/hoomd-blue,joaander/hoomd-blue,joaander/hoomd-blue,joaander/hoomd-blue
|
Add python tests for cosine squared angles
|
# -*- coding: iso-8859-1 -*-
# Maintainer: joaander
from hoomd import *
from hoomd import md
context.initialize()
import unittest
import os
import numpy
# tests md.angle.cosinesq
class angle_cosinesq_tests (unittest.TestCase):
def setUp(self):
print
snap = data.make_snapshot(N=40,
box=data.boxdim(L=100),
particle_types = ['A'],
bond_types = [],
angle_types = ['angleA'],
dihedral_types = [],
improper_types = [])
if comm.get_rank() == 0:
snap.angles.resize(10);
for i in range(10):
x = numpy.array([i, 0, 0], dtype=numpy.float32)
snap.particles.position[4*i+0,:] = x;
x += numpy.random.random(3)
snap.particles.position[4*i+1,:] = x;
x += numpy.random.random(3)
snap.particles.position[4*i+2,:] = x;
x += numpy.random.random(3)
snap.particles.position[4*i+3,:] = x;
snap.angles.group[i,:] = [4*i+0, 4*i+1, 4*i+2];
init.read_snapshot(snap)
context.current.sorter.set_params(grid=8)
# test to see that se can create an md.angle.cosinesq
def test_create(self):
md.angle.cosinesq();
# test setting coefficients
def test_set_coeff(self):
cosinesq = md.angle.cosinesq();
cosinesq.angle_coeff.set('angleA', k=1.0, t0=0.78125)
all = group.all();
md.integrate.mode_standard(dt=0.005);
md.integrate.nve(all);
run(100);
# test coefficient not set checking
def test_set_coeff_fail(self):
cosinesq = md.angle.harmonic();
all = group.all();
md.integrate.mode_standard(dt=0.005);
md.integrate.nve(all);
self.assertRaises(RuntimeError, run, 100);
def tearDown(self):
context.initialize();
if __name__ == '__main__':
unittest.main(argv = ['test.py', '-v'])
|
<commit_before><commit_msg>Add python tests for cosine squared angles<commit_after>
|
# -*- coding: iso-8859-1 -*-
# Maintainer: joaander
from hoomd import *
from hoomd import md
context.initialize()
import unittest
import os
import numpy
# tests md.angle.cosinesq
class angle_cosinesq_tests (unittest.TestCase):
def setUp(self):
print
snap = data.make_snapshot(N=40,
box=data.boxdim(L=100),
particle_types = ['A'],
bond_types = [],
angle_types = ['angleA'],
dihedral_types = [],
improper_types = [])
if comm.get_rank() == 0:
snap.angles.resize(10);
for i in range(10):
x = numpy.array([i, 0, 0], dtype=numpy.float32)
snap.particles.position[4*i+0,:] = x;
x += numpy.random.random(3)
snap.particles.position[4*i+1,:] = x;
x += numpy.random.random(3)
snap.particles.position[4*i+2,:] = x;
x += numpy.random.random(3)
snap.particles.position[4*i+3,:] = x;
snap.angles.group[i,:] = [4*i+0, 4*i+1, 4*i+2];
init.read_snapshot(snap)
context.current.sorter.set_params(grid=8)
# test to see that se can create an md.angle.cosinesq
def test_create(self):
md.angle.cosinesq();
# test setting coefficients
def test_set_coeff(self):
cosinesq = md.angle.cosinesq();
cosinesq.angle_coeff.set('angleA', k=1.0, t0=0.78125)
all = group.all();
md.integrate.mode_standard(dt=0.005);
md.integrate.nve(all);
run(100);
# test coefficient not set checking
def test_set_coeff_fail(self):
cosinesq = md.angle.harmonic();
all = group.all();
md.integrate.mode_standard(dt=0.005);
md.integrate.nve(all);
self.assertRaises(RuntimeError, run, 100);
def tearDown(self):
context.initialize();
if __name__ == '__main__':
unittest.main(argv = ['test.py', '-v'])
|
Add python tests for cosine squared angles# -*- coding: iso-8859-1 -*-
# Maintainer: joaander
from hoomd import *
from hoomd import md
context.initialize()
import unittest
import os
import numpy
# tests md.angle.cosinesq
class angle_cosinesq_tests (unittest.TestCase):
def setUp(self):
print
snap = data.make_snapshot(N=40,
box=data.boxdim(L=100),
particle_types = ['A'],
bond_types = [],
angle_types = ['angleA'],
dihedral_types = [],
improper_types = [])
if comm.get_rank() == 0:
snap.angles.resize(10);
for i in range(10):
x = numpy.array([i, 0, 0], dtype=numpy.float32)
snap.particles.position[4*i+0,:] = x;
x += numpy.random.random(3)
snap.particles.position[4*i+1,:] = x;
x += numpy.random.random(3)
snap.particles.position[4*i+2,:] = x;
x += numpy.random.random(3)
snap.particles.position[4*i+3,:] = x;
snap.angles.group[i,:] = [4*i+0, 4*i+1, 4*i+2];
init.read_snapshot(snap)
context.current.sorter.set_params(grid=8)
# test to see that se can create an md.angle.cosinesq
def test_create(self):
md.angle.cosinesq();
# test setting coefficients
def test_set_coeff(self):
cosinesq = md.angle.cosinesq();
cosinesq.angle_coeff.set('angleA', k=1.0, t0=0.78125)
all = group.all();
md.integrate.mode_standard(dt=0.005);
md.integrate.nve(all);
run(100);
# test coefficient not set checking
def test_set_coeff_fail(self):
cosinesq = md.angle.harmonic();
all = group.all();
md.integrate.mode_standard(dt=0.005);
md.integrate.nve(all);
self.assertRaises(RuntimeError, run, 100);
def tearDown(self):
context.initialize();
if __name__ == '__main__':
unittest.main(argv = ['test.py', '-v'])
|
<commit_before><commit_msg>Add python tests for cosine squared angles<commit_after># -*- coding: iso-8859-1 -*-
# Maintainer: joaander
from hoomd import *
from hoomd import md
context.initialize()
import unittest
import os
import numpy
# tests md.angle.cosinesq
class angle_cosinesq_tests (unittest.TestCase):
def setUp(self):
print
snap = data.make_snapshot(N=40,
box=data.boxdim(L=100),
particle_types = ['A'],
bond_types = [],
angle_types = ['angleA'],
dihedral_types = [],
improper_types = [])
if comm.get_rank() == 0:
snap.angles.resize(10);
for i in range(10):
x = numpy.array([i, 0, 0], dtype=numpy.float32)
snap.particles.position[4*i+0,:] = x;
x += numpy.random.random(3)
snap.particles.position[4*i+1,:] = x;
x += numpy.random.random(3)
snap.particles.position[4*i+2,:] = x;
x += numpy.random.random(3)
snap.particles.position[4*i+3,:] = x;
snap.angles.group[i,:] = [4*i+0, 4*i+1, 4*i+2];
init.read_snapshot(snap)
context.current.sorter.set_params(grid=8)
# test to see that se can create an md.angle.cosinesq
def test_create(self):
md.angle.cosinesq();
# test setting coefficients
def test_set_coeff(self):
cosinesq = md.angle.cosinesq();
cosinesq.angle_coeff.set('angleA', k=1.0, t0=0.78125)
all = group.all();
md.integrate.mode_standard(dt=0.005);
md.integrate.nve(all);
run(100);
# test coefficient not set checking
def test_set_coeff_fail(self):
cosinesq = md.angle.harmonic();
all = group.all();
md.integrate.mode_standard(dt=0.005);
md.integrate.nve(all);
self.assertRaises(RuntimeError, run, 100);
def tearDown(self):
context.initialize();
if __name__ == '__main__':
unittest.main(argv = ['test.py', '-v'])
|
|
d168256dd4b75375770b3391f716ceaba2cf722e
|
cpsScrap.py
|
cpsScrap.py
|
#user/local/bin/python
#uses python3
import urllib.request
from bs4 import BeautifulSoup
url = "http://www.bls.gov/cps/cpsaat01.htm" #access the search term through website
page = urllib.request.urlopen(url).read()
soup = BeautifulSoup(page)
tables = soup.findAll('table') #find all tables
#print(tables)
mainTable = soup.find(id="cps_eeann_year")
print(mainTable)
for table in tables:
caption = table.find('caption')
print(caption)
data = [] #create holder for results
rows = mainTable.findAll('tr')
print(rows)
for row in rows[1:]:
dataRow = [] #create smaller list for each row
for th in row.findAll('th'):
dataRow.append(th.text)
for td in row.findAll('td'):
dataRow.append(td.text)
data.append(dataRow)
data.pop()
print(data)
|
Add scrapper of Bureau of Labor Statistis Employment status
|
Add scrapper of Bureau of Labor Statistis Employment status
|
Python
|
mit
|
lexieheinle/python-productivity
|
Add scrapper of Bureau of Labor Statistis Employment status
|
#user/local/bin/python
#uses python3
import urllib.request
from bs4 import BeautifulSoup
url = "http://www.bls.gov/cps/cpsaat01.htm" #access the search term through website
page = urllib.request.urlopen(url).read()
soup = BeautifulSoup(page)
tables = soup.findAll('table') #find all tables
#print(tables)
mainTable = soup.find(id="cps_eeann_year")
print(mainTable)
for table in tables:
caption = table.find('caption')
print(caption)
data = [] #create holder for results
rows = mainTable.findAll('tr')
print(rows)
for row in rows[1:]:
dataRow = [] #create smaller list for each row
for th in row.findAll('th'):
dataRow.append(th.text)
for td in row.findAll('td'):
dataRow.append(td.text)
data.append(dataRow)
data.pop()
print(data)
|
<commit_before><commit_msg>Add scrapper of Bureau of Labor Statistis Employment status<commit_after>
|
#user/local/bin/python
#uses python3
import urllib.request
from bs4 import BeautifulSoup
url = "http://www.bls.gov/cps/cpsaat01.htm" #access the search term through website
page = urllib.request.urlopen(url).read()
soup = BeautifulSoup(page)
tables = soup.findAll('table') #find all tables
#print(tables)
mainTable = soup.find(id="cps_eeann_year")
print(mainTable)
for table in tables:
caption = table.find('caption')
print(caption)
data = [] #create holder for results
rows = mainTable.findAll('tr')
print(rows)
for row in rows[1:]:
dataRow = [] #create smaller list for each row
for th in row.findAll('th'):
dataRow.append(th.text)
for td in row.findAll('td'):
dataRow.append(td.text)
data.append(dataRow)
data.pop()
print(data)
|
Add scrapper of Bureau of Labor Statistis Employment status#user/local/bin/python
#uses python3
import urllib.request
from bs4 import BeautifulSoup
url = "http://www.bls.gov/cps/cpsaat01.htm" #access the search term through website
page = urllib.request.urlopen(url).read()
soup = BeautifulSoup(page)
tables = soup.findAll('table') #find all tables
#print(tables)
mainTable = soup.find(id="cps_eeann_year")
print(mainTable)
for table in tables:
caption = table.find('caption')
print(caption)
data = [] #create holder for results
rows = mainTable.findAll('tr')
print(rows)
for row in rows[1:]:
dataRow = [] #create smaller list for each row
for th in row.findAll('th'):
dataRow.append(th.text)
for td in row.findAll('td'):
dataRow.append(td.text)
data.append(dataRow)
data.pop()
print(data)
|
<commit_before><commit_msg>Add scrapper of Bureau of Labor Statistis Employment status<commit_after>#user/local/bin/python
#uses python3
import urllib.request
from bs4 import BeautifulSoup
url = "http://www.bls.gov/cps/cpsaat01.htm" #access the search term through website
page = urllib.request.urlopen(url).read()
soup = BeautifulSoup(page)
tables = soup.findAll('table') #find all tables
#print(tables)
mainTable = soup.find(id="cps_eeann_year")
print(mainTable)
for table in tables:
caption = table.find('caption')
print(caption)
data = [] #create holder for results
rows = mainTable.findAll('tr')
print(rows)
for row in rows[1:]:
dataRow = [] #create smaller list for each row
for th in row.findAll('th'):
dataRow.append(th.text)
for td in row.findAll('td'):
dataRow.append(td.text)
data.append(dataRow)
data.pop()
print(data)
|
|
39de01462baf3db60c5a0f5d8a3b529f798730ab
|
pygraphc/bin/Check.py
|
pygraphc/bin/Check.py
|
import csv
from os import listdir
from pygraphc.evaluation.ExternalEvaluation import ExternalEvaluation
# read result and ground truth
result_dir = '/home/hudan/Git/pygraphc/result/improved_majorclust/Kippo/per_day/'
groundtruth_dir = '/home/hudan/Git/labeled-authlog/dataset/Kippo/attack/'
result_files = listdir(result_dir)
# open evaluation file
f = open('check.csv', 'wt')
writer = csv.writer(f)
# set header
header = ('file_name', 'tp', 'fp', 'fn', 'tn', 'specificity', 'precision', 'recall', 'accuracy')
writer.writerow(header)
for result_file in result_files:
if result_file.endswith('.anomaly.perline.txt'):
filename = result_file.split('.anomaly')[0]
print filename
groundtruth_file = groundtruth_dir + filename + '.attack'
# check confusion matrix
true_false, specificity, precision, recall, accuracy = \
ExternalEvaluation.get_confusion(groundtruth_file, result_dir + result_file)
# write evaluation result to file
row = (filename, true_false[0], true_false[1], true_false[2], true_false[3],
specificity, precision, recall, accuracy)
writer.writerow(row)
f.close()
|
Add script to check the performance
|
Add script to check the performance
|
Python
|
mit
|
studiawan/pygraphc
|
Add script to check the performance
|
import csv
from os import listdir
from pygraphc.evaluation.ExternalEvaluation import ExternalEvaluation
# read result and ground truth
result_dir = '/home/hudan/Git/pygraphc/result/improved_majorclust/Kippo/per_day/'
groundtruth_dir = '/home/hudan/Git/labeled-authlog/dataset/Kippo/attack/'
result_files = listdir(result_dir)
# open evaluation file
f = open('check.csv', 'wt')
writer = csv.writer(f)
# set header
header = ('file_name', 'tp', 'fp', 'fn', 'tn', 'specificity', 'precision', 'recall', 'accuracy')
writer.writerow(header)
for result_file in result_files:
if result_file.endswith('.anomaly.perline.txt'):
filename = result_file.split('.anomaly')[0]
print filename
groundtruth_file = groundtruth_dir + filename + '.attack'
# check confusion matrix
true_false, specificity, precision, recall, accuracy = \
ExternalEvaluation.get_confusion(groundtruth_file, result_dir + result_file)
# write evaluation result to file
row = (filename, true_false[0], true_false[1], true_false[2], true_false[3],
specificity, precision, recall, accuracy)
writer.writerow(row)
f.close()
|
<commit_before><commit_msg>Add script to check the performance<commit_after>
|
import csv
from os import listdir
from pygraphc.evaluation.ExternalEvaluation import ExternalEvaluation
# read result and ground truth
result_dir = '/home/hudan/Git/pygraphc/result/improved_majorclust/Kippo/per_day/'
groundtruth_dir = '/home/hudan/Git/labeled-authlog/dataset/Kippo/attack/'
result_files = listdir(result_dir)
# open evaluation file
f = open('check.csv', 'wt')
writer = csv.writer(f)
# set header
header = ('file_name', 'tp', 'fp', 'fn', 'tn', 'specificity', 'precision', 'recall', 'accuracy')
writer.writerow(header)
for result_file in result_files:
if result_file.endswith('.anomaly.perline.txt'):
filename = result_file.split('.anomaly')[0]
print filename
groundtruth_file = groundtruth_dir + filename + '.attack'
# check confusion matrix
true_false, specificity, precision, recall, accuracy = \
ExternalEvaluation.get_confusion(groundtruth_file, result_dir + result_file)
# write evaluation result to file
row = (filename, true_false[0], true_false[1], true_false[2], true_false[3],
specificity, precision, recall, accuracy)
writer.writerow(row)
f.close()
|
Add script to check the performanceimport csv
from os import listdir
from pygraphc.evaluation.ExternalEvaluation import ExternalEvaluation
# read result and ground truth
result_dir = '/home/hudan/Git/pygraphc/result/improved_majorclust/Kippo/per_day/'
groundtruth_dir = '/home/hudan/Git/labeled-authlog/dataset/Kippo/attack/'
result_files = listdir(result_dir)
# open evaluation file
f = open('check.csv', 'wt')
writer = csv.writer(f)
# set header
header = ('file_name', 'tp', 'fp', 'fn', 'tn', 'specificity', 'precision', 'recall', 'accuracy')
writer.writerow(header)
for result_file in result_files:
if result_file.endswith('.anomaly.perline.txt'):
filename = result_file.split('.anomaly')[0]
print filename
groundtruth_file = groundtruth_dir + filename + '.attack'
# check confusion matrix
true_false, specificity, precision, recall, accuracy = \
ExternalEvaluation.get_confusion(groundtruth_file, result_dir + result_file)
# write evaluation result to file
row = (filename, true_false[0], true_false[1], true_false[2], true_false[3],
specificity, precision, recall, accuracy)
writer.writerow(row)
f.close()
|
<commit_before><commit_msg>Add script to check the performance<commit_after>import csv
from os import listdir
from pygraphc.evaluation.ExternalEvaluation import ExternalEvaluation
# read result and ground truth
result_dir = '/home/hudan/Git/pygraphc/result/improved_majorclust/Kippo/per_day/'
groundtruth_dir = '/home/hudan/Git/labeled-authlog/dataset/Kippo/attack/'
result_files = listdir(result_dir)
# open evaluation file
f = open('check.csv', 'wt')
writer = csv.writer(f)
# set header
header = ('file_name', 'tp', 'fp', 'fn', 'tn', 'specificity', 'precision', 'recall', 'accuracy')
writer.writerow(header)
for result_file in result_files:
if result_file.endswith('.anomaly.perline.txt'):
filename = result_file.split('.anomaly')[0]
print filename
groundtruth_file = groundtruth_dir + filename + '.attack'
# check confusion matrix
true_false, specificity, precision, recall, accuracy = \
ExternalEvaluation.get_confusion(groundtruth_file, result_dir + result_file)
# write evaluation result to file
row = (filename, true_false[0], true_false[1], true_false[2], true_false[3],
specificity, precision, recall, accuracy)
writer.writerow(row)
f.close()
|
|
8da927a0a196301ce5fb2ef2224e556b4d414729
|
problem1.py
|
problem1.py
|
from collections import Counter
if __name__ == '__main__':
with open('data/rosalind_dna.txt', mode='r') as f:
sequence = f.read()
counts = Counter(sequence)
print '%d %d %d %d' % (counts['A'], counts['C'], counts['G'], counts['T'])
|
Add solution for counting DNA nucleotides
|
Add solution for counting DNA nucleotides
|
Python
|
mit
|
MichaelAquilina/rosalind-solutions
|
Add solution for counting DNA nucleotides
|
from collections import Counter
if __name__ == '__main__':
with open('data/rosalind_dna.txt', mode='r') as f:
sequence = f.read()
counts = Counter(sequence)
print '%d %d %d %d' % (counts['A'], counts['C'], counts['G'], counts['T'])
|
<commit_before><commit_msg>Add solution for counting DNA nucleotides<commit_after>
|
from collections import Counter
if __name__ == '__main__':
with open('data/rosalind_dna.txt', mode='r') as f:
sequence = f.read()
counts = Counter(sequence)
print '%d %d %d %d' % (counts['A'], counts['C'], counts['G'], counts['T'])
|
Add solution for counting DNA nucleotidesfrom collections import Counter
if __name__ == '__main__':
with open('data/rosalind_dna.txt', mode='r') as f:
sequence = f.read()
counts = Counter(sequence)
print '%d %d %d %d' % (counts['A'], counts['C'], counts['G'], counts['T'])
|
<commit_before><commit_msg>Add solution for counting DNA nucleotides<commit_after>from collections import Counter
if __name__ == '__main__':
with open('data/rosalind_dna.txt', mode='r') as f:
sequence = f.read()
counts = Counter(sequence)
print '%d %d %d %d' % (counts['A'], counts['C'], counts['G'], counts['T'])
|
|
5178b104993401f47b1c4d8e3c796bef379e389e
|
letsmeet/communities/migrations/0011_auto_20160318_2240.py
|
letsmeet/communities/migrations/0011_auto_20160318_2240.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-03-18 21:40
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('communities', '0010_auto_20160108_1618'),
]
operations = [
migrations.AlterModelManagers(
name='community',
managers=[
],
),
]
|
Add migration for `communities` app.
|
Add migration for `communities` app.
It was created independently of changes to `events` app; probably because of some old code changes in `communities` app.
|
Python
|
mit
|
letsmeet-click/letsmeet.click,letsmeet-click/letsmeet.click,letsmeet-click/letsmeet.click,letsmeet-click/letsmeet.click
|
Add migration for `communities` app.
It was created independently of changes to `events` app; probably because of some old code changes in `communities` app.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-03-18 21:40
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('communities', '0010_auto_20160108_1618'),
]
operations = [
migrations.AlterModelManagers(
name='community',
managers=[
],
),
]
|
<commit_before><commit_msg>Add migration for `communities` app.
It was created independently of changes to `events` app; probably because of some old code changes in `communities` app.<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-03-18 21:40
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('communities', '0010_auto_20160108_1618'),
]
operations = [
migrations.AlterModelManagers(
name='community',
managers=[
],
),
]
|
Add migration for `communities` app.
It was created independently of changes to `events` app; probably because of some old code changes in `communities` app.# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-03-18 21:40
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('communities', '0010_auto_20160108_1618'),
]
operations = [
migrations.AlterModelManagers(
name='community',
managers=[
],
),
]
|
<commit_before><commit_msg>Add migration for `communities` app.
It was created independently of changes to `events` app; probably because of some old code changes in `communities` app.<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-03-18 21:40
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('communities', '0010_auto_20160108_1618'),
]
operations = [
migrations.AlterModelManagers(
name='community',
managers=[
],
),
]
|
|
eee85e5157d69cee515c01fa0f638b064de74a6e
|
script/graph-reports-by-transport-mode.py
|
script/graph-reports-by-transport-mode.py
|
#!/usr/bin/python
# A script to draw graphs showing the number of reports by transport
# type each month. This script expects to find a file called
# 'problems.csv' in the current directory which should be generated
# by:
# DIR=`pwd` rake data:create_problem_spreadsheet
import csv
import datetime
from collections import defaultdict
import matplotlib.pyplot as plt
import itertools
transport_types = 'Bus', 'Train', 'Tram', 'Ferry'
counts = {}
for transport_type in transport_types:
counts[transport_type] = defaultdict(int)
today = datetime.date.today()
latest_month = earliest_month = (today.year, today.month)
maximum_count = -1
with open('problems.csv') as fp:
reader = csv.DictReader(fp, delimiter=',', quotechar='"')
for row in reader:
d = datetime.datetime.strptime(row['Created'],
'%H:%M %d %b %Y')
ym = (d.year, d.month)
earliest_month = min(earliest_month, ym)
transport_modes = row['Transport mode']
for transport_type in transport_types:
if transport_type in transport_modes:
counts[transport_type][ym] += 1
maximum_count = max(maximum_count, counts[transport_type][ym])
def months_between(earlier, later):
"""A generator for iterating over months represented as (year, month) tuples"""
year = earlier[0]
month = earlier[1]
while True:
yield (year, month)
if month == 12:
year = year + 1
month = 1
else:
month += 1
if (year, month) > later:
return
all_months = list(months_between(earliest_month, latest_month))
# Don't include the most recent month, since the data won't be
# complete:
all_months = all_months[0:-1]
months = len(all_months)
# Make sure that there's at least a zero count for each month we're
# considering:
for d in counts.values():
for ym in all_months:
d[ym] += 0
for transport_type in transport_types:
fig = plt.figure()
d = counts[transport_type]
x = all_months
y = [d[ym] for ym in x]
x_labels = ["%d-%d" % ym for ym in x]
plt.bar(range(months), y)
plt.xticks(range(months), x_labels, size='small', rotation=60)
plt.xlim(0, months)
plt.ylim(0, maximum_count)
plt.title(transport_type + ' issue report counts per month on FixMyTransport')
plt.ylabel('Number of problems or campaigns')
plt.savefig(transport_type.lower() + ".png", dpi=100)
|
Add a script to graph problem reports over time by transport mode
|
Add a script to graph problem reports over time by transport mode
|
Python
|
agpl-3.0
|
mysociety/fixmytransport,mysociety/fixmytransport,mysociety/fixmytransport,mysociety/fixmytransport,mysociety/fixmytransport,mysociety/fixmytransport
|
Add a script to graph problem reports over time by transport mode
|
#!/usr/bin/python
# A script to draw graphs showing the number of reports by transport
# type each month. This script expects to find a file called
# 'problems.csv' in the current directory which should be generated
# by:
# DIR=`pwd` rake data:create_problem_spreadsheet
import csv
import datetime
from collections import defaultdict
import matplotlib.pyplot as plt
import itertools
transport_types = 'Bus', 'Train', 'Tram', 'Ferry'
counts = {}
for transport_type in transport_types:
counts[transport_type] = defaultdict(int)
today = datetime.date.today()
latest_month = earliest_month = (today.year, today.month)
maximum_count = -1
with open('problems.csv') as fp:
reader = csv.DictReader(fp, delimiter=',', quotechar='"')
for row in reader:
d = datetime.datetime.strptime(row['Created'],
'%H:%M %d %b %Y')
ym = (d.year, d.month)
earliest_month = min(earliest_month, ym)
transport_modes = row['Transport mode']
for transport_type in transport_types:
if transport_type in transport_modes:
counts[transport_type][ym] += 1
maximum_count = max(maximum_count, counts[transport_type][ym])
def months_between(earlier, later):
"""A generator for iterating over months represented as (year, month) tuples"""
year = earlier[0]
month = earlier[1]
while True:
yield (year, month)
if month == 12:
year = year + 1
month = 1
else:
month += 1
if (year, month) > later:
return
all_months = list(months_between(earliest_month, latest_month))
# Don't include the most recent month, since the data won't be
# complete:
all_months = all_months[0:-1]
months = len(all_months)
# Make sure that there's at least a zero count for each month we're
# considering:
for d in counts.values():
for ym in all_months:
d[ym] += 0
for transport_type in transport_types:
fig = plt.figure()
d = counts[transport_type]
x = all_months
y = [d[ym] for ym in x]
x_labels = ["%d-%d" % ym for ym in x]
plt.bar(range(months), y)
plt.xticks(range(months), x_labels, size='small', rotation=60)
plt.xlim(0, months)
plt.ylim(0, maximum_count)
plt.title(transport_type + ' issue report counts per month on FixMyTransport')
plt.ylabel('Number of problems or campaigns')
plt.savefig(transport_type.lower() + ".png", dpi=100)
|
<commit_before><commit_msg>Add a script to graph problem reports over time by transport mode<commit_after>
|
#!/usr/bin/python
# A script to draw graphs showing the number of reports by transport
# type each month. This script expects to find a file called
# 'problems.csv' in the current directory which should be generated
# by:
# DIR=`pwd` rake data:create_problem_spreadsheet
import csv
import datetime
from collections import defaultdict
import matplotlib.pyplot as plt
import itertools
transport_types = 'Bus', 'Train', 'Tram', 'Ferry'
counts = {}
for transport_type in transport_types:
counts[transport_type] = defaultdict(int)
today = datetime.date.today()
latest_month = earliest_month = (today.year, today.month)
maximum_count = -1
with open('problems.csv') as fp:
reader = csv.DictReader(fp, delimiter=',', quotechar='"')
for row in reader:
d = datetime.datetime.strptime(row['Created'],
'%H:%M %d %b %Y')
ym = (d.year, d.month)
earliest_month = min(earliest_month, ym)
transport_modes = row['Transport mode']
for transport_type in transport_types:
if transport_type in transport_modes:
counts[transport_type][ym] += 1
maximum_count = max(maximum_count, counts[transport_type][ym])
def months_between(earlier, later):
"""A generator for iterating over months represented as (year, month) tuples"""
year = earlier[0]
month = earlier[1]
while True:
yield (year, month)
if month == 12:
year = year + 1
month = 1
else:
month += 1
if (year, month) > later:
return
all_months = list(months_between(earliest_month, latest_month))
# Don't include the most recent month, since the data won't be
# complete:
all_months = all_months[0:-1]
months = len(all_months)
# Make sure that there's at least a zero count for each month we're
# considering:
for d in counts.values():
for ym in all_months:
d[ym] += 0
for transport_type in transport_types:
fig = plt.figure()
d = counts[transport_type]
x = all_months
y = [d[ym] for ym in x]
x_labels = ["%d-%d" % ym for ym in x]
plt.bar(range(months), y)
plt.xticks(range(months), x_labels, size='small', rotation=60)
plt.xlim(0, months)
plt.ylim(0, maximum_count)
plt.title(transport_type + ' issue report counts per month on FixMyTransport')
plt.ylabel('Number of problems or campaigns')
plt.savefig(transport_type.lower() + ".png", dpi=100)
|
Add a script to graph problem reports over time by transport mode#!/usr/bin/python
# A script to draw graphs showing the number of reports by transport
# type each month. This script expects to find a file called
# 'problems.csv' in the current directory which should be generated
# by:
# DIR=`pwd` rake data:create_problem_spreadsheet
import csv
import datetime
from collections import defaultdict
import matplotlib.pyplot as plt
import itertools
transport_types = 'Bus', 'Train', 'Tram', 'Ferry'
counts = {}
for transport_type in transport_types:
counts[transport_type] = defaultdict(int)
today = datetime.date.today()
latest_month = earliest_month = (today.year, today.month)
maximum_count = -1
with open('problems.csv') as fp:
reader = csv.DictReader(fp, delimiter=',', quotechar='"')
for row in reader:
d = datetime.datetime.strptime(row['Created'],
'%H:%M %d %b %Y')
ym = (d.year, d.month)
earliest_month = min(earliest_month, ym)
transport_modes = row['Transport mode']
for transport_type in transport_types:
if transport_type in transport_modes:
counts[transport_type][ym] += 1
maximum_count = max(maximum_count, counts[transport_type][ym])
def months_between(earlier, later):
"""A generator for iterating over months represented as (year, month) tuples"""
year = earlier[0]
month = earlier[1]
while True:
yield (year, month)
if month == 12:
year = year + 1
month = 1
else:
month += 1
if (year, month) > later:
return
all_months = list(months_between(earliest_month, latest_month))
# Don't include the most recent month, since the data won't be
# complete:
all_months = all_months[0:-1]
months = len(all_months)
# Make sure that there's at least a zero count for each month we're
# considering:
for d in counts.values():
for ym in all_months:
d[ym] += 0
for transport_type in transport_types:
fig = plt.figure()
d = counts[transport_type]
x = all_months
y = [d[ym] for ym in x]
x_labels = ["%d-%d" % ym for ym in x]
plt.bar(range(months), y)
plt.xticks(range(months), x_labels, size='small', rotation=60)
plt.xlim(0, months)
plt.ylim(0, maximum_count)
plt.title(transport_type + ' issue report counts per month on FixMyTransport')
plt.ylabel('Number of problems or campaigns')
plt.savefig(transport_type.lower() + ".png", dpi=100)
|
<commit_before><commit_msg>Add a script to graph problem reports over time by transport mode<commit_after>#!/usr/bin/python
# A script to draw graphs showing the number of reports by transport
# type each month. This script expects to find a file called
# 'problems.csv' in the current directory which should be generated
# by:
# DIR=`pwd` rake data:create_problem_spreadsheet
import csv
import datetime
from collections import defaultdict
import matplotlib.pyplot as plt
import itertools
transport_types = 'Bus', 'Train', 'Tram', 'Ferry'
counts = {}
for transport_type in transport_types:
counts[transport_type] = defaultdict(int)
today = datetime.date.today()
latest_month = earliest_month = (today.year, today.month)
maximum_count = -1
with open('problems.csv') as fp:
reader = csv.DictReader(fp, delimiter=',', quotechar='"')
for row in reader:
d = datetime.datetime.strptime(row['Created'],
'%H:%M %d %b %Y')
ym = (d.year, d.month)
earliest_month = min(earliest_month, ym)
transport_modes = row['Transport mode']
for transport_type in transport_types:
if transport_type in transport_modes:
counts[transport_type][ym] += 1
maximum_count = max(maximum_count, counts[transport_type][ym])
def months_between(earlier, later):
"""A generator for iterating over months represented as (year, month) tuples"""
year = earlier[0]
month = earlier[1]
while True:
yield (year, month)
if month == 12:
year = year + 1
month = 1
else:
month += 1
if (year, month) > later:
return
all_months = list(months_between(earliest_month, latest_month))
# Don't include the most recent month, since the data won't be
# complete:
all_months = all_months[0:-1]
months = len(all_months)
# Make sure that there's at least a zero count for each month we're
# considering:
for d in counts.values():
for ym in all_months:
d[ym] += 0
for transport_type in transport_types:
fig = plt.figure()
d = counts[transport_type]
x = all_months
y = [d[ym] for ym in x]
x_labels = ["%d-%d" % ym for ym in x]
plt.bar(range(months), y)
plt.xticks(range(months), x_labels, size='small', rotation=60)
plt.xlim(0, months)
plt.ylim(0, maximum_count)
plt.title(transport_type + ' issue report counts per month on FixMyTransport')
plt.ylabel('Number of problems or campaigns')
plt.savefig(transport_type.lower() + ".png", dpi=100)
|
|
b75de39ae75b3780988673ffbab869dec20c1521
|
serverconfig/toolkit_uwsgi_star_shadow.py
|
serverconfig/toolkit_uwsgi_star_shadow.py
|
# mysite_uwsgi.ini file
# http://uwsgi-docs.readthedocs.io/en/latest/tutorials/Django_and_nginx.html
[uwsgi]
# Django-related settings
# the base directory (full path)
chdir = /home/users/starandshadow/star_site
# Django's wsgi file
module = wsgi
# the virtualenv (full path)
home = /home/users/starandshadow/star_site/venv
env = DJANGO_SETTINGS_MODULE=toolkit.settings
# process-related settings
# master
master = true
# maximum number of worker processes
processes = 2
# the socket (use the full path to be saf)e
socket = /tmp/starandshadow_django.sock
# TODO work out permission to run in /var/run
#socket = /var/run/ed_django.sock
# Socket permissions. Was 664, then 666. 660 works now
chmod-socket = 660
# clear environment on exit
vacuum = true
uid = www-data
gid = www-data
enable-threads = true
# http://uwsgi-docs.readthedocs.io/en/latest/articles/SerializingAccept.html
thunder-lock = true
#harakiri = 20 # respawn processes taking more than 20 seconds
# limit the project to 128 MB
limit-as = 512
# Enable stats
stats = 127.0.0.1:9194
safe-pidfile = /tmp/star_shadow_django.pid
daemonize = /var/log/starandshadow/star_django.log
|
Add uwsgi conf file for star and shadow
|
Add uwsgi conf file for star and shadow
|
Python
|
agpl-3.0
|
BenMotz/cubetoolkit,BenMotz/cubetoolkit,BenMotz/cubetoolkit,BenMotz/cubetoolkit
|
Add uwsgi conf file for star and shadow
|
# mysite_uwsgi.ini file
# http://uwsgi-docs.readthedocs.io/en/latest/tutorials/Django_and_nginx.html
[uwsgi]
# Django-related settings
# the base directory (full path)
chdir = /home/users/starandshadow/star_site
# Django's wsgi file
module = wsgi
# the virtualenv (full path)
home = /home/users/starandshadow/star_site/venv
env = DJANGO_SETTINGS_MODULE=toolkit.settings
# process-related settings
# master
master = true
# maximum number of worker processes
processes = 2
# the socket (use the full path to be saf)e
socket = /tmp/starandshadow_django.sock
# TODO work out permission to run in /var/run
#socket = /var/run/ed_django.sock
# Socket permissions. Was 664, then 666. 660 works now
chmod-socket = 660
# clear environment on exit
vacuum = true
uid = www-data
gid = www-data
enable-threads = true
# http://uwsgi-docs.readthedocs.io/en/latest/articles/SerializingAccept.html
thunder-lock = true
#harakiri = 20 # respawn processes taking more than 20 seconds
# limit the project to 128 MB
limit-as = 512
# Enable stats
stats = 127.0.0.1:9194
safe-pidfile = /tmp/star_shadow_django.pid
daemonize = /var/log/starandshadow/star_django.log
|
<commit_before><commit_msg>Add uwsgi conf file for star and shadow<commit_after>
|
# mysite_uwsgi.ini file
# http://uwsgi-docs.readthedocs.io/en/latest/tutorials/Django_and_nginx.html
[uwsgi]
# Django-related settings
# the base directory (full path)
chdir = /home/users/starandshadow/star_site
# Django's wsgi file
module = wsgi
# the virtualenv (full path)
home = /home/users/starandshadow/star_site/venv
env = DJANGO_SETTINGS_MODULE=toolkit.settings
# process-related settings
# master
master = true
# maximum number of worker processes
processes = 2
# the socket (use the full path to be saf)e
socket = /tmp/starandshadow_django.sock
# TODO work out permission to run in /var/run
#socket = /var/run/ed_django.sock
# Socket permissions. Was 664, then 666. 660 works now
chmod-socket = 660
# clear environment on exit
vacuum = true
uid = www-data
gid = www-data
enable-threads = true
# http://uwsgi-docs.readthedocs.io/en/latest/articles/SerializingAccept.html
thunder-lock = true
#harakiri = 20 # respawn processes taking more than 20 seconds
# limit the project to 128 MB
limit-as = 512
# Enable stats
stats = 127.0.0.1:9194
safe-pidfile = /tmp/star_shadow_django.pid
daemonize = /var/log/starandshadow/star_django.log
|
Add uwsgi conf file for star and shadow# mysite_uwsgi.ini file
# http://uwsgi-docs.readthedocs.io/en/latest/tutorials/Django_and_nginx.html
[uwsgi]
# Django-related settings
# the base directory (full path)
chdir = /home/users/starandshadow/star_site
# Django's wsgi file
module = wsgi
# the virtualenv (full path)
home = /home/users/starandshadow/star_site/venv
env = DJANGO_SETTINGS_MODULE=toolkit.settings
# process-related settings
# master
master = true
# maximum number of worker processes
processes = 2
# the socket (use the full path to be saf)e
socket = /tmp/starandshadow_django.sock
# TODO work out permission to run in /var/run
#socket = /var/run/ed_django.sock
# Socket permissions. Was 664, then 666. 660 works now
chmod-socket = 660
# clear environment on exit
vacuum = true
uid = www-data
gid = www-data
enable-threads = true
# http://uwsgi-docs.readthedocs.io/en/latest/articles/SerializingAccept.html
thunder-lock = true
#harakiri = 20 # respawn processes taking more than 20 seconds
# limit the project to 128 MB
limit-as = 512
# Enable stats
stats = 127.0.0.1:9194
safe-pidfile = /tmp/star_shadow_django.pid
daemonize = /var/log/starandshadow/star_django.log
|
<commit_before><commit_msg>Add uwsgi conf file for star and shadow<commit_after># mysite_uwsgi.ini file
# http://uwsgi-docs.readthedocs.io/en/latest/tutorials/Django_and_nginx.html
[uwsgi]
# Django-related settings
# the base directory (full path)
chdir = /home/users/starandshadow/star_site
# Django's wsgi file
module = wsgi
# the virtualenv (full path)
home = /home/users/starandshadow/star_site/venv
env = DJANGO_SETTINGS_MODULE=toolkit.settings
# process-related settings
# master
master = true
# maximum number of worker processes
processes = 2
# the socket (use the full path to be saf)e
socket = /tmp/starandshadow_django.sock
# TODO work out permission to run in /var/run
#socket = /var/run/ed_django.sock
# Socket permissions. Was 664, then 666. 660 works now
chmod-socket = 660
# clear environment on exit
vacuum = true
uid = www-data
gid = www-data
enable-threads = true
# http://uwsgi-docs.readthedocs.io/en/latest/articles/SerializingAccept.html
thunder-lock = true
#harakiri = 20 # respawn processes taking more than 20 seconds
# limit the project to 128 MB
limit-as = 512
# Enable stats
stats = 127.0.0.1:9194
safe-pidfile = /tmp/star_shadow_django.pid
daemonize = /var/log/starandshadow/star_django.log
|
|
4bfc3f650bd5560f2e2e469252ea1166496a4b6b
|
example1.py
|
example1.py
|
from __future__ import print_function
from datacube.api.model import DatasetType, Satellite, Ls57Arg25Bands, Fc25Bands, Pq25Bands
from datacube.api.query import list_tiles_as_list
from datacube.api.utils import get_dataset_metadata
from datacube.api.utils import get_dataset_data
from geotiff_to_netcdf import BandAsDimensionNetCDF, SeparateBandsTimeSlicedNetCDF
from datetime import date
import sys
# Define a DatasetType mapping
DS_TYPES_MAP = {'arg25': DatasetType.ARG25,
'fc25': DatasetType.FC25,
'pq25': DatasetType.PQ25}
cell_x = 146
cell_y = -34
min_date = date(2014, 1, 1)
max_date = date(2014, 12, 31)
satellites = 'LS5,LS7'
satellites = [Satellite(i) for i in satellites.split(',')]
dataset_types = 'ARG25,FC25,PQ25'
dataset_types = [i.lower() for i in dataset_types.split(',')]
dataset_types = [DS_TYPES_MAP[i] for i in dataset_types]
tiles = list_tiles_as_list(x=[cell_x], y=[cell_y], acq_min=min_date,
acq_max=max_date, dataset_types=dataset_types,
satellites=satellites)
netcdf_filename = 'multi_band.nc'
arg25paths = [dstile.path for tile in tiles for dstile in tile.datasets.itervalues() if dstile.bands == Ls57Arg25Bands]
initial_file = [path for path in arg25paths if path.endswith("tif")][0]
arg25paths.remove(initial_file)
print("Creating {}".format(netcdf_filename))
multi_band_file = SeparateBandsTimeSlicedNetCDF(netcdf_filename, mode='w')
print("Creating netcdf structure from {}".format(initial_file))
multi_band_file.create_from_geotiff(initial_file)
print("Appending from {}".format(arg25paths[1]))
multi_band_file.append_geotiff(arg25paths[1])
multi_band_file.close()
# for path in arg25paths:
# multi_band_file
|
Add dodgy NetCDF creation example
|
Add dodgy NetCDF creation example
|
Python
|
bsd-3-clause
|
omad/datacube-experiments
|
Add dodgy NetCDF creation example
|
from __future__ import print_function
from datacube.api.model import DatasetType, Satellite, Ls57Arg25Bands, Fc25Bands, Pq25Bands
from datacube.api.query import list_tiles_as_list
from datacube.api.utils import get_dataset_metadata
from datacube.api.utils import get_dataset_data
from geotiff_to_netcdf import BandAsDimensionNetCDF, SeparateBandsTimeSlicedNetCDF
from datetime import date
import sys
# Define a DatasetType mapping
DS_TYPES_MAP = {'arg25': DatasetType.ARG25,
'fc25': DatasetType.FC25,
'pq25': DatasetType.PQ25}
cell_x = 146
cell_y = -34
min_date = date(2014, 1, 1)
max_date = date(2014, 12, 31)
satellites = 'LS5,LS7'
satellites = [Satellite(i) for i in satellites.split(',')]
dataset_types = 'ARG25,FC25,PQ25'
dataset_types = [i.lower() for i in dataset_types.split(',')]
dataset_types = [DS_TYPES_MAP[i] for i in dataset_types]
tiles = list_tiles_as_list(x=[cell_x], y=[cell_y], acq_min=min_date,
acq_max=max_date, dataset_types=dataset_types,
satellites=satellites)
netcdf_filename = 'multi_band.nc'
arg25paths = [dstile.path for tile in tiles for dstile in tile.datasets.itervalues() if dstile.bands == Ls57Arg25Bands]
initial_file = [path for path in arg25paths if path.endswith("tif")][0]
arg25paths.remove(initial_file)
print("Creating {}".format(netcdf_filename))
multi_band_file = SeparateBandsTimeSlicedNetCDF(netcdf_filename, mode='w')
print("Creating netcdf structure from {}".format(initial_file))
multi_band_file.create_from_geotiff(initial_file)
print("Appending from {}".format(arg25paths[1]))
multi_band_file.append_geotiff(arg25paths[1])
multi_band_file.close()
# for path in arg25paths:
# multi_band_file
|
<commit_before><commit_msg>Add dodgy NetCDF creation example<commit_after>
|
from __future__ import print_function
from datacube.api.model import DatasetType, Satellite, Ls57Arg25Bands, Fc25Bands, Pq25Bands
from datacube.api.query import list_tiles_as_list
from datacube.api.utils import get_dataset_metadata
from datacube.api.utils import get_dataset_data
from geotiff_to_netcdf import BandAsDimensionNetCDF, SeparateBandsTimeSlicedNetCDF
from datetime import date
import sys
# Define a DatasetType mapping
DS_TYPES_MAP = {'arg25': DatasetType.ARG25,
'fc25': DatasetType.FC25,
'pq25': DatasetType.PQ25}
cell_x = 146
cell_y = -34
min_date = date(2014, 1, 1)
max_date = date(2014, 12, 31)
satellites = 'LS5,LS7'
satellites = [Satellite(i) for i in satellites.split(',')]
dataset_types = 'ARG25,FC25,PQ25'
dataset_types = [i.lower() for i in dataset_types.split(',')]
dataset_types = [DS_TYPES_MAP[i] for i in dataset_types]
tiles = list_tiles_as_list(x=[cell_x], y=[cell_y], acq_min=min_date,
acq_max=max_date, dataset_types=dataset_types,
satellites=satellites)
netcdf_filename = 'multi_band.nc'
arg25paths = [dstile.path for tile in tiles for dstile in tile.datasets.itervalues() if dstile.bands == Ls57Arg25Bands]
initial_file = [path for path in arg25paths if path.endswith("tif")][0]
arg25paths.remove(initial_file)
print("Creating {}".format(netcdf_filename))
multi_band_file = SeparateBandsTimeSlicedNetCDF(netcdf_filename, mode='w')
print("Creating netcdf structure from {}".format(initial_file))
multi_band_file.create_from_geotiff(initial_file)
print("Appending from {}".format(arg25paths[1]))
multi_band_file.append_geotiff(arg25paths[1])
multi_band_file.close()
# for path in arg25paths:
# multi_band_file
|
Add dodgy NetCDF creation examplefrom __future__ import print_function
from datacube.api.model import DatasetType, Satellite, Ls57Arg25Bands, Fc25Bands, Pq25Bands
from datacube.api.query import list_tiles_as_list
from datacube.api.utils import get_dataset_metadata
from datacube.api.utils import get_dataset_data
from geotiff_to_netcdf import BandAsDimensionNetCDF, SeparateBandsTimeSlicedNetCDF
from datetime import date
import sys
# Define a DatasetType mapping
DS_TYPES_MAP = {'arg25': DatasetType.ARG25,
'fc25': DatasetType.FC25,
'pq25': DatasetType.PQ25}
cell_x = 146
cell_y = -34
min_date = date(2014, 1, 1)
max_date = date(2014, 12, 31)
satellites = 'LS5,LS7'
satellites = [Satellite(i) for i in satellites.split(',')]
dataset_types = 'ARG25,FC25,PQ25'
dataset_types = [i.lower() for i in dataset_types.split(',')]
dataset_types = [DS_TYPES_MAP[i] for i in dataset_types]
tiles = list_tiles_as_list(x=[cell_x], y=[cell_y], acq_min=min_date,
acq_max=max_date, dataset_types=dataset_types,
satellites=satellites)
netcdf_filename = 'multi_band.nc'
arg25paths = [dstile.path for tile in tiles for dstile in tile.datasets.itervalues() if dstile.bands == Ls57Arg25Bands]
initial_file = [path for path in arg25paths if path.endswith("tif")][0]
arg25paths.remove(initial_file)
print("Creating {}".format(netcdf_filename))
multi_band_file = SeparateBandsTimeSlicedNetCDF(netcdf_filename, mode='w')
print("Creating netcdf structure from {}".format(initial_file))
multi_band_file.create_from_geotiff(initial_file)
print("Appending from {}".format(arg25paths[1]))
multi_band_file.append_geotiff(arg25paths[1])
multi_band_file.close()
# for path in arg25paths:
# multi_band_file
|
<commit_before><commit_msg>Add dodgy NetCDF creation example<commit_after>from __future__ import print_function
from datacube.api.model import DatasetType, Satellite, Ls57Arg25Bands, Fc25Bands, Pq25Bands
from datacube.api.query import list_tiles_as_list
from datacube.api.utils import get_dataset_metadata
from datacube.api.utils import get_dataset_data
from geotiff_to_netcdf import BandAsDimensionNetCDF, SeparateBandsTimeSlicedNetCDF
from datetime import date
import sys
# Define a DatasetType mapping
DS_TYPES_MAP = {'arg25': DatasetType.ARG25,
'fc25': DatasetType.FC25,
'pq25': DatasetType.PQ25}
cell_x = 146
cell_y = -34
min_date = date(2014, 1, 1)
max_date = date(2014, 12, 31)
satellites = 'LS5,LS7'
satellites = [Satellite(i) for i in satellites.split(',')]
dataset_types = 'ARG25,FC25,PQ25'
dataset_types = [i.lower() for i in dataset_types.split(',')]
dataset_types = [DS_TYPES_MAP[i] for i in dataset_types]
tiles = list_tiles_as_list(x=[cell_x], y=[cell_y], acq_min=min_date,
acq_max=max_date, dataset_types=dataset_types,
satellites=satellites)
netcdf_filename = 'multi_band.nc'
arg25paths = [dstile.path for tile in tiles for dstile in tile.datasets.itervalues() if dstile.bands == Ls57Arg25Bands]
initial_file = [path for path in arg25paths if path.endswith("tif")][0]
arg25paths.remove(initial_file)
print("Creating {}".format(netcdf_filename))
multi_band_file = SeparateBandsTimeSlicedNetCDF(netcdf_filename, mode='w')
print("Creating netcdf structure from {}".format(initial_file))
multi_band_file.create_from_geotiff(initial_file)
print("Appending from {}".format(arg25paths[1]))
multi_band_file.append_geotiff(arg25paths[1])
multi_band_file.close()
# for path in arg25paths:
# multi_band_file
|
|
bbf28b1c7fa3fb9f9074b9d4879c30e810ab3f31
|
ktbs_bench/utils/bench_manager.py
|
ktbs_bench/utils/bench_manager.py
|
from contextlib import contextmanager
from ktbs_bench.utils.decorators import bench as util_bench
class BenchManager:
def __init__(self):
self._contexts = []
self._bench_funcs = []
def bench(self, func):
"""Prepare a function to be benched and add it to the list to be run later."""
func = util_bench(func)
self._bench_funcs.append(func)
def context(self, func):
"""Decorate a function to act as a context."""
func = contextmanager(func)
self._contexts.append(func)
def run(self, output_file):
"""Execute each collected function against each context."""
pass
|
Add premise of Bench Manager
|
Add premise of Bench Manager
|
Python
|
mit
|
ktbs/ktbs-bench,ktbs/ktbs-bench
|
Add premise of Bench Manager
|
from contextlib import contextmanager
from ktbs_bench.utils.decorators import bench as util_bench
class BenchManager:
def __init__(self):
self._contexts = []
self._bench_funcs = []
def bench(self, func):
"""Prepare a function to be benched and add it to the list to be run later."""
func = util_bench(func)
self._bench_funcs.append(func)
def context(self, func):
"""Decorate a function to act as a context."""
func = contextmanager(func)
self._contexts.append(func)
def run(self, output_file):
"""Execute each collected function against each context."""
pass
|
<commit_before><commit_msg>Add premise of Bench Manager<commit_after>
|
from contextlib import contextmanager
from ktbs_bench.utils.decorators import bench as util_bench
class BenchManager:
def __init__(self):
self._contexts = []
self._bench_funcs = []
def bench(self, func):
"""Prepare a function to be benched and add it to the list to be run later."""
func = util_bench(func)
self._bench_funcs.append(func)
def context(self, func):
"""Decorate a function to act as a context."""
func = contextmanager(func)
self._contexts.append(func)
def run(self, output_file):
"""Execute each collected function against each context."""
pass
|
Add premise of Bench Managerfrom contextlib import contextmanager
from ktbs_bench.utils.decorators import bench as util_bench
class BenchManager:
def __init__(self):
self._contexts = []
self._bench_funcs = []
def bench(self, func):
"""Prepare a function to be benched and add it to the list to be run later."""
func = util_bench(func)
self._bench_funcs.append(func)
def context(self, func):
"""Decorate a function to act as a context."""
func = contextmanager(func)
self._contexts.append(func)
def run(self, output_file):
"""Execute each collected function against each context."""
pass
|
<commit_before><commit_msg>Add premise of Bench Manager<commit_after>from contextlib import contextmanager
from ktbs_bench.utils.decorators import bench as util_bench
class BenchManager:
def __init__(self):
self._contexts = []
self._bench_funcs = []
def bench(self, func):
"""Prepare a function to be benched and add it to the list to be run later."""
func = util_bench(func)
self._bench_funcs.append(func)
def context(self, func):
"""Decorate a function to act as a context."""
func = contextmanager(func)
self._contexts.append(func)
def run(self, output_file):
"""Execute each collected function against each context."""
pass
|
|
cbb182ff0e999954c7a5c8fd19097a441762666b
|
salt/sdb/etcd_db.py
|
salt/sdb/etcd_db.py
|
# -*- coding: utf-8 -*-
'''
etcd Database Module
:maintainer: SaltStack
:maturity: New
:depends: python-etcd
:platform: all
This module allows access to the etcd database using an ``sdb://`` URI. This
package is located at ``https://pypi.python.org/pypi/python-etcd``.
Like all sdb modules, the etcd module requires a configuration profile to
be configured in either the minion or master configuration file. This profile
requires very little. In the example:
.. code-block:: yaml
myetcd:
driver: etcd
etcd.host: 127.0.0.1
etcd.port: 4001
The ``driver`` refers to the etcd module, ``etcd.host`` refers to the host that
is hosting the etcd database and ``etcd.port`` refers to the port on that host.
.. code-block:: yaml
password: sdb://etcd/mypassword
.. versionadded:: 2014.1.4 (Hydrogen)
'''
# import python libs
import logging
try:
import salt.utils.etcd_util
HAS_LIBS = True
except ImportError:
HAS_LIBS = False
log = logging.getLogger(__name__)
__func_alias__ = {
'set_': 'set'
}
__virtualname__ = 'etcd'
def __virtual__():
'''
Only load the module if keyring is installed
'''
if HAS_LIBS:
return __virtualname__
return False
def set_(key, value, service=None, profile=None): # pylint: disable=W0613
'''
Set a key/value pair in the etcd service
'''
client = _get_conn(profile)
client.set(key, value)
return get(key, service, profile)
def get(key, service=None, profile=None): # pylint: disable=W0613
'''
Get a value from the etcd service
'''
client = _get_conn(profile)
result = client.get(key)
return result.value
def _get_conn(profile):
'''
Get a connection
'''
return salt.utils.etcd_util.get_conn(profile)
|
Add sdb driver for etcd
|
Add sdb driver for etcd
|
Python
|
apache-2.0
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
Add sdb driver for etcd
|
# -*- coding: utf-8 -*-
'''
etcd Database Module
:maintainer: SaltStack
:maturity: New
:depends: python-etcd
:platform: all
This module allows access to the etcd database using an ``sdb://`` URI. This
package is located at ``https://pypi.python.org/pypi/python-etcd``.
Like all sdb modules, the etcd module requires a configuration profile to
be configured in either the minion or master configuration file. This profile
requires very little. In the example:
.. code-block:: yaml
myetcd:
driver: etcd
etcd.host: 127.0.0.1
etcd.port: 4001
The ``driver`` refers to the etcd module, ``etcd.host`` refers to the host that
is hosting the etcd database and ``etcd.port`` refers to the port on that host.
.. code-block:: yaml
password: sdb://etcd/mypassword
.. versionadded:: 2014.1.4 (Hydrogen)
'''
# import python libs
import logging
try:
import salt.utils.etcd_util
HAS_LIBS = True
except ImportError:
HAS_LIBS = False
log = logging.getLogger(__name__)
__func_alias__ = {
'set_': 'set'
}
__virtualname__ = 'etcd'
def __virtual__():
'''
Only load the module if keyring is installed
'''
if HAS_LIBS:
return __virtualname__
return False
def set_(key, value, service=None, profile=None): # pylint: disable=W0613
'''
Set a key/value pair in the etcd service
'''
client = _get_conn(profile)
client.set(key, value)
return get(key, service, profile)
def get(key, service=None, profile=None): # pylint: disable=W0613
'''
Get a value from the etcd service
'''
client = _get_conn(profile)
result = client.get(key)
return result.value
def _get_conn(profile):
'''
Get a connection
'''
return salt.utils.etcd_util.get_conn(profile)
|
<commit_before><commit_msg>Add sdb driver for etcd<commit_after>
|
# -*- coding: utf-8 -*-
'''
etcd Database Module
:maintainer: SaltStack
:maturity: New
:depends: python-etcd
:platform: all
This module allows access to the etcd database using an ``sdb://`` URI. This
package is located at ``https://pypi.python.org/pypi/python-etcd``.
Like all sdb modules, the etcd module requires a configuration profile to
be configured in either the minion or master configuration file. This profile
requires very little. In the example:
.. code-block:: yaml
myetcd:
driver: etcd
etcd.host: 127.0.0.1
etcd.port: 4001
The ``driver`` refers to the etcd module, ``etcd.host`` refers to the host that
is hosting the etcd database and ``etcd.port`` refers to the port on that host.
.. code-block:: yaml
password: sdb://etcd/mypassword
.. versionadded:: 2014.1.4 (Hydrogen)
'''
# import python libs
import logging
try:
import salt.utils.etcd_util
HAS_LIBS = True
except ImportError:
HAS_LIBS = False
log = logging.getLogger(__name__)
__func_alias__ = {
'set_': 'set'
}
__virtualname__ = 'etcd'
def __virtual__():
'''
Only load the module if keyring is installed
'''
if HAS_LIBS:
return __virtualname__
return False
def set_(key, value, service=None, profile=None): # pylint: disable=W0613
'''
Set a key/value pair in the etcd service
'''
client = _get_conn(profile)
client.set(key, value)
return get(key, service, profile)
def get(key, service=None, profile=None): # pylint: disable=W0613
'''
Get a value from the etcd service
'''
client = _get_conn(profile)
result = client.get(key)
return result.value
def _get_conn(profile):
'''
Get a connection
'''
return salt.utils.etcd_util.get_conn(profile)
|
Add sdb driver for etcd# -*- coding: utf-8 -*-
'''
etcd Database Module
:maintainer: SaltStack
:maturity: New
:depends: python-etcd
:platform: all
This module allows access to the etcd database using an ``sdb://`` URI. This
package is located at ``https://pypi.python.org/pypi/python-etcd``.
Like all sdb modules, the etcd module requires a configuration profile to
be configured in either the minion or master configuration file. This profile
requires very little. In the example:
.. code-block:: yaml
myetcd:
driver: etcd
etcd.host: 127.0.0.1
etcd.port: 4001
The ``driver`` refers to the etcd module, ``etcd.host`` refers to the host that
is hosting the etcd database and ``etcd.port`` refers to the port on that host.
.. code-block:: yaml
password: sdb://etcd/mypassword
.. versionadded:: 2014.1.4 (Hydrogen)
'''
# import python libs
import logging
try:
import salt.utils.etcd_util
HAS_LIBS = True
except ImportError:
HAS_LIBS = False
log = logging.getLogger(__name__)
__func_alias__ = {
'set_': 'set'
}
__virtualname__ = 'etcd'
def __virtual__():
'''
Only load the module if keyring is installed
'''
if HAS_LIBS:
return __virtualname__
return False
def set_(key, value, service=None, profile=None): # pylint: disable=W0613
'''
Set a key/value pair in the etcd service
'''
client = _get_conn(profile)
client.set(key, value)
return get(key, service, profile)
def get(key, service=None, profile=None): # pylint: disable=W0613
'''
Get a value from the etcd service
'''
client = _get_conn(profile)
result = client.get(key)
return result.value
def _get_conn(profile):
'''
Get a connection
'''
return salt.utils.etcd_util.get_conn(profile)
|
<commit_before><commit_msg>Add sdb driver for etcd<commit_after># -*- coding: utf-8 -*-
'''
etcd Database Module
:maintainer: SaltStack
:maturity: New
:depends: python-etcd
:platform: all
This module allows access to the etcd database using an ``sdb://`` URI. This
package is located at ``https://pypi.python.org/pypi/python-etcd``.
Like all sdb modules, the etcd module requires a configuration profile to
be configured in either the minion or master configuration file. This profile
requires very little. In the example:
.. code-block:: yaml
myetcd:
driver: etcd
etcd.host: 127.0.0.1
etcd.port: 4001
The ``driver`` refers to the etcd module, ``etcd.host`` refers to the host that
is hosting the etcd database and ``etcd.port`` refers to the port on that host.
.. code-block:: yaml
password: sdb://etcd/mypassword
.. versionadded:: 2014.1.4 (Hydrogen)
'''
# import python libs
import logging
try:
import salt.utils.etcd_util
HAS_LIBS = True
except ImportError:
HAS_LIBS = False
log = logging.getLogger(__name__)
__func_alias__ = {
'set_': 'set'
}
__virtualname__ = 'etcd'
def __virtual__():
'''
Only load the module if keyring is installed
'''
if HAS_LIBS:
return __virtualname__
return False
def set_(key, value, service=None, profile=None): # pylint: disable=W0613
'''
Set a key/value pair in the etcd service
'''
client = _get_conn(profile)
client.set(key, value)
return get(key, service, profile)
def get(key, service=None, profile=None): # pylint: disable=W0613
'''
Get a value from the etcd service
'''
client = _get_conn(profile)
result = client.get(key)
return result.value
def _get_conn(profile):
'''
Get a connection
'''
return salt.utils.etcd_util.get_conn(profile)
|
|
916c453d2ba939fb7eb15f4d87557c37bfc57a21
|
tests/components/test_shell_command.py
|
tests/components/test_shell_command.py
|
"""
tests.test_shell_command
~~~~~~~~~~~~~~~~~~~~~~~~
Tests demo component.
"""
import os
import tempfile
import unittest
from homeassistant import core
from homeassistant.components import shell_command
class TestShellCommand(unittest.TestCase):
""" Test the demo module. """
def setUp(self): # pylint: disable=invalid-name
self.hass = core.HomeAssistant()
def tearDown(self): # pylint: disable=invalid-name
""" Stop down stuff we started. """
self.hass.stop()
def test_executing_service(self):
""" Test if able to call a configured service. """
with tempfile.TemporaryDirectory() as tempdirname:
path = os.path.join(tempdirname, 'called.txt')
shell_command.setup(self.hass, {
'shell_command': {
'test_service': "touch {}".format(path)
}
})
self.hass.services.call('shell_command', 'test_service',
blocking=True)
self.assertTrue(os.path.isfile(path))
|
Add test for shell command
|
Add test for shell command
|
Python
|
mit
|
Julian/home-assistant,ct-23/home-assistant,mezz64/home-assistant,pottzer/home-assistant,joopert/home-assistant,FreekingDean/home-assistant,eagleamon/home-assistant,betrisey/home-assistant,eagleamon/home-assistant,open-homeautomation/home-assistant,aequitas/home-assistant,open-homeautomation/home-assistant,bdfoster/blumate,nkgilley/home-assistant,leppa/home-assistant,tinloaf/home-assistant,nugget/home-assistant,robjohnson189/home-assistant,devdelay/home-assistant,srcLurker/home-assistant,instantchow/home-assistant,pottzer/home-assistant,robjohnson189/home-assistant,shaftoe/home-assistant,ma314smith/home-assistant,srcLurker/home-assistant,caiuspb/home-assistant,MartinHjelmare/home-assistant,ct-23/home-assistant,Julian/home-assistant,hmronline/home-assistant,balloob/home-assistant,toddeye/home-assistant,mikaelboman/home-assistant,morphis/home-assistant,philipbl/home-assistant,jnewland/home-assistant,adrienbrault/home-assistant,jamespcole/home-assistant,luxus/home-assistant,leoc/home-assistant,tboyce1/home-assistant,florianholzapfel/home-assistant,mezz64/home-assistant,LinuxChristian/home-assistant,Julian/home-assistant,jabesq/home-assistant,robjohnson189/home-assistant,happyleavesaoc/home-assistant,auduny/home-assistant,miniconfig/home-assistant,eagleamon/home-assistant,shaftoe/home-assistant,LinuxChristian/home-assistant,kyvinh/home-assistant,partofthething/home-assistant,oandrew/home-assistant,molobrakos/home-assistant,alexmogavero/home-assistant,devdelay/home-assistant,fbradyirl/home-assistant,Duoxilian/home-assistant,oandrew/home-assistant,tinloaf/home-assistant,jnewland/home-assistant,Danielhiversen/home-assistant,tboyce021/home-assistant,justyns/home-assistant,HydrelioxGitHub/home-assistant,philipbl/home-assistant,stefan-jonasson/home-assistant,soldag/home-assistant,sander76/home-assistant,hmronline/home-assistant,GenericStudent/home-assistant,mikaelboman/home-assistant,DavidLP/home-assistant,JshWright/home-assistant,eagleamon/home-assistant,keerts/home-assistant,Duoxilian/home-assistant,sfam/home-assistant,tboyce021/home-assistant,PetePriority/home-assistant,ewandor/home-assistant,leoc/home-assistant,persandstrom/home-assistant,dmeulen/home-assistant,jaharkes/home-assistant,PetePriority/home-assistant,ma314smith/home-assistant,emilhetty/home-assistant,hexxter/home-assistant,deisi/home-assistant,tboyce1/home-assistant,mKeRix/home-assistant,sffjunkie/home-assistant,ct-23/home-assistant,kennedyshead/home-assistant,pschmitt/home-assistant,keerts/home-assistant,nnic/home-assistant,sdague/home-assistant,dmeulen/home-assistant,MartinHjelmare/home-assistant,emilhetty/home-assistant,miniconfig/home-assistant,emilhetty/home-assistant,Theb-1/home-assistant,jaharkes/home-assistant,aronsky/home-assistant,molobrakos/home-assistant,joopert/home-assistant,hexxter/home-assistant,alexmogavero/home-assistant,MungoRae/home-assistant,Theb-1/home-assistant,rohitranjan1991/home-assistant,balloob/home-assistant,jnewland/home-assistant,badele/home-assistant,Julian/home-assistant,varunr047/homefile,varunr047/homefile,jamespcole/home-assistant,JshWright/home-assistant,MungoRae/home-assistant,philipbl/home-assistant,turbokongen/home-assistant,lukas-hetzenecker/home-assistant,Smart-Torvy/torvy-home-assistant,shaftoe/home-assistant,nnic/home-assistant,LinuxChristian/home-assistant,robjohnson189/home-assistant,MungoRae/home-assistant,xifle/home-assistant,PetePriority/home-assistant,nevercast/home-assistant,toddeye/home-assistant,nugget/home-assistant,badele/home-assistant,leoc/home-assistant,hexxter/home-assistant,varunr047/homefile,happyleavesaoc/home-assistant,keerts/home-assistant,shaftoe/home-assistant,betrisey/home-assistant,turbokongen/home-assistant,ct-23/home-assistant,coteyr/home-assistant,sander76/home-assistant,sfam/home-assistant,Teagan42/home-assistant,leoc/home-assistant,DavidLP/home-assistant,kennedyshead/home-assistant,miniconfig/home-assistant,happyleavesaoc/home-assistant,titilambert/home-assistant,srcLurker/home-assistant,auduny/home-assistant,emilhetty/home-assistant,kyvinh/home-assistant,florianholzapfel/home-assistant,w1ll1am23/home-assistant,MungoRae/home-assistant,coteyr/home-assistant,nevercast/home-assistant,hmronline/home-assistant,miniconfig/home-assistant,fbradyirl/home-assistant,luxus/home-assistant,ewandor/home-assistant,HydrelioxGitHub/home-assistant,deisi/home-assistant,deisi/home-assistant,jaharkes/home-assistant,coteyr/home-assistant,xifle/home-assistant,alexmogavero/home-assistant,mKeRix/home-assistant,justyns/home-assistant,Zyell/home-assistant,Cinntax/home-assistant,nkgilley/home-assistant,Cinntax/home-assistant,aequitas/home-assistant,ma314smith/home-assistant,rohitranjan1991/home-assistant,mikaelboman/home-assistant,titilambert/home-assistant,sffjunkie/home-assistant,devdelay/home-assistant,aoakeson/home-assistant,mKeRix/home-assistant,pottzer/home-assistant,w1ll1am23/home-assistant,sdague/home-assistant,tboyce1/home-assistant,Duoxilian/home-assistant,jaharkes/home-assistant,robbiet480/home-assistant,dmeulen/home-assistant,happyleavesaoc/home-assistant,betrisey/home-assistant,sffjunkie/home-assistant,caiuspb/home-assistant,nnic/home-assistant,sffjunkie/home-assistant,Teagan42/home-assistant,Zac-HD/home-assistant,tchellomello/home-assistant,bdfoster/blumate,bdfoster/blumate,instantchow/home-assistant,Smart-Torvy/torvy-home-assistant,jawilson/home-assistant,qedi-r/home-assistant,varunr047/homefile,varunr047/homefile,postlund/home-assistant,mikaelboman/home-assistant,dmeulen/home-assistant,tchellomello/home-assistant,soldag/home-assistant,hexxter/home-assistant,LinuxChristian/home-assistant,auduny/home-assistant,Zyell/home-assistant,tinloaf/home-assistant,oandrew/home-assistant,nugget/home-assistant,adrienbrault/home-assistant,HydrelioxGitHub/home-assistant,morphis/home-assistant,tboyce1/home-assistant,instantchow/home-assistant,jawilson/home-assistant,Zac-HD/home-assistant,sfam/home-assistant,caiuspb/home-assistant,home-assistant/home-assistant,srcLurker/home-assistant,stefan-jonasson/home-assistant,Danielhiversen/home-assistant,aoakeson/home-assistant,Smart-Torvy/torvy-home-assistant,xifle/home-assistant,betrisey/home-assistant,balloob/home-assistant,hmronline/home-assistant,hmronline/home-assistant,aequitas/home-assistant,GenericStudent/home-assistant,FreekingDean/home-assistant,nevercast/home-assistant,Theb-1/home-assistant,fbradyirl/home-assistant,luxus/home-assistant,kyvinh/home-assistant,lukas-hetzenecker/home-assistant,persandstrom/home-assistant,ct-23/home-assistant,open-homeautomation/home-assistant,Duoxilian/home-assistant,stefan-jonasson/home-assistant,JshWright/home-assistant,partofthething/home-assistant,morphis/home-assistant,alexmogavero/home-assistant,bdfoster/blumate,xifle/home-assistant,molobrakos/home-assistant,mKeRix/home-assistant,JshWright/home-assistant,florianholzapfel/home-assistant,Zyell/home-assistant,DavidLP/home-assistant,oandrew/home-assistant,Smart-Torvy/torvy-home-assistant,Zac-HD/home-assistant,sffjunkie/home-assistant,aoakeson/home-assistant,justyns/home-assistant,LinuxChristian/home-assistant,emilhetty/home-assistant,keerts/home-assistant,Zac-HD/home-assistant,MungoRae/home-assistant,florianholzapfel/home-assistant,ma314smith/home-assistant,MartinHjelmare/home-assistant,deisi/home-assistant,kyvinh/home-assistant,persandstrom/home-assistant,rohitranjan1991/home-assistant,badele/home-assistant,devdelay/home-assistant,robbiet480/home-assistant,philipbl/home-assistant,aronsky/home-assistant,pschmitt/home-assistant,home-assistant/home-assistant,jabesq/home-assistant,ewandor/home-assistant,bdfoster/blumate,stefan-jonasson/home-assistant,jamespcole/home-assistant,open-homeautomation/home-assistant,leppa/home-assistant,deisi/home-assistant,jabesq/home-assistant,postlund/home-assistant,morphis/home-assistant,qedi-r/home-assistant,mikaelboman/home-assistant
|
Add test for shell command
|
"""
tests.test_shell_command
~~~~~~~~~~~~~~~~~~~~~~~~
Tests demo component.
"""
import os
import tempfile
import unittest
from homeassistant import core
from homeassistant.components import shell_command
class TestShellCommand(unittest.TestCase):
""" Test the demo module. """
def setUp(self): # pylint: disable=invalid-name
self.hass = core.HomeAssistant()
def tearDown(self): # pylint: disable=invalid-name
""" Stop down stuff we started. """
self.hass.stop()
def test_executing_service(self):
""" Test if able to call a configured service. """
with tempfile.TemporaryDirectory() as tempdirname:
path = os.path.join(tempdirname, 'called.txt')
shell_command.setup(self.hass, {
'shell_command': {
'test_service': "touch {}".format(path)
}
})
self.hass.services.call('shell_command', 'test_service',
blocking=True)
self.assertTrue(os.path.isfile(path))
|
<commit_before><commit_msg>Add test for shell command<commit_after>
|
"""
tests.test_shell_command
~~~~~~~~~~~~~~~~~~~~~~~~
Tests demo component.
"""
import os
import tempfile
import unittest
from homeassistant import core
from homeassistant.components import shell_command
class TestShellCommand(unittest.TestCase):
""" Test the demo module. """
def setUp(self): # pylint: disable=invalid-name
self.hass = core.HomeAssistant()
def tearDown(self): # pylint: disable=invalid-name
""" Stop down stuff we started. """
self.hass.stop()
def test_executing_service(self):
""" Test if able to call a configured service. """
with tempfile.TemporaryDirectory() as tempdirname:
path = os.path.join(tempdirname, 'called.txt')
shell_command.setup(self.hass, {
'shell_command': {
'test_service': "touch {}".format(path)
}
})
self.hass.services.call('shell_command', 'test_service',
blocking=True)
self.assertTrue(os.path.isfile(path))
|
Add test for shell command"""
tests.test_shell_command
~~~~~~~~~~~~~~~~~~~~~~~~
Tests demo component.
"""
import os
import tempfile
import unittest
from homeassistant import core
from homeassistant.components import shell_command
class TestShellCommand(unittest.TestCase):
""" Test the demo module. """
def setUp(self): # pylint: disable=invalid-name
self.hass = core.HomeAssistant()
def tearDown(self): # pylint: disable=invalid-name
""" Stop down stuff we started. """
self.hass.stop()
def test_executing_service(self):
""" Test if able to call a configured service. """
with tempfile.TemporaryDirectory() as tempdirname:
path = os.path.join(tempdirname, 'called.txt')
shell_command.setup(self.hass, {
'shell_command': {
'test_service': "touch {}".format(path)
}
})
self.hass.services.call('shell_command', 'test_service',
blocking=True)
self.assertTrue(os.path.isfile(path))
|
<commit_before><commit_msg>Add test for shell command<commit_after>"""
tests.test_shell_command
~~~~~~~~~~~~~~~~~~~~~~~~
Tests demo component.
"""
import os
import tempfile
import unittest
from homeassistant import core
from homeassistant.components import shell_command
class TestShellCommand(unittest.TestCase):
""" Test the demo module. """
def setUp(self): # pylint: disable=invalid-name
self.hass = core.HomeAssistant()
def tearDown(self): # pylint: disable=invalid-name
""" Stop down stuff we started. """
self.hass.stop()
def test_executing_service(self):
""" Test if able to call a configured service. """
with tempfile.TemporaryDirectory() as tempdirname:
path = os.path.join(tempdirname, 'called.txt')
shell_command.setup(self.hass, {
'shell_command': {
'test_service': "touch {}".format(path)
}
})
self.hass.services.call('shell_command', 'test_service',
blocking=True)
self.assertTrue(os.path.isfile(path))
|
|
029de4a3a10f31b2d300e100db7767722698f00a
|
tests/core/parse/test_parse_literal.py
|
tests/core/parse/test_parse_literal.py
|
import unittest
from mygrations.core.parse.rule_literal import rule_literal
class test_parse_regexp( unittest.TestCase ):
def get_rule( self, name, literal ):
return rule_literal( False, { 'name': name, 'value': literal }, {} )
def test_name_not_required( self ):
rule = self.get_rule( '', 'ASDF' )
self.assertEquals( rule.name, 'ASDF' )
def test_value_required( self ):
with self.assertRaises( ValueError ):
self.get_rule( 'bob', '' )
def test_can_init_with_name_and_value( self ):
rule = self.get_rule( 'bob', ',' )
self.assertEquals( rule.name, 'bob' )
self.assertEquals( rule.literal, ',' )
def test_match_beginning_only( self ):
rule = self.get_rule( 'bob', ',' )
self.assertFalse( rule.parse( 'hey,' ) )
self.assertEquals( '', rule.result )
def test_leftovers_is_input_for_no_match( self ):
rule = self.get_rule( 'bob', ',' )
string = 'hey,'
rule.parse( string )
self.assertEquals( string, rule.leftovers )
def test_no_leftovers_for_full_match( self ):
string = '23483438'
rule = self.get_rule( 'bob', string )
self.assertTrue( rule.parse( string ) )
self.assertEquals( string, rule.result )
self.assertEquals( '', rule.leftovers )
def test_calc_leftovers_trim( self ):
rule = self.get_rule( 'bob', ',' )
string = ', bob'
self.assertTrue( rule.parse( string ) )
self.assertEquals( ',', rule.result )
self.assertEquals( 'bob', rule.leftovers )
|
Test for newly refactored literal rule
|
Test for newly refactored literal rule
|
Python
|
mit
|
cmancone/mygrations,cmancone/mygrations
|
Test for newly refactored literal rule
|
import unittest
from mygrations.core.parse.rule_literal import rule_literal
class test_parse_regexp( unittest.TestCase ):
def get_rule( self, name, literal ):
return rule_literal( False, { 'name': name, 'value': literal }, {} )
def test_name_not_required( self ):
rule = self.get_rule( '', 'ASDF' )
self.assertEquals( rule.name, 'ASDF' )
def test_value_required( self ):
with self.assertRaises( ValueError ):
self.get_rule( 'bob', '' )
def test_can_init_with_name_and_value( self ):
rule = self.get_rule( 'bob', ',' )
self.assertEquals( rule.name, 'bob' )
self.assertEquals( rule.literal, ',' )
def test_match_beginning_only( self ):
rule = self.get_rule( 'bob', ',' )
self.assertFalse( rule.parse( 'hey,' ) )
self.assertEquals( '', rule.result )
def test_leftovers_is_input_for_no_match( self ):
rule = self.get_rule( 'bob', ',' )
string = 'hey,'
rule.parse( string )
self.assertEquals( string, rule.leftovers )
def test_no_leftovers_for_full_match( self ):
string = '23483438'
rule = self.get_rule( 'bob', string )
self.assertTrue( rule.parse( string ) )
self.assertEquals( string, rule.result )
self.assertEquals( '', rule.leftovers )
def test_calc_leftovers_trim( self ):
rule = self.get_rule( 'bob', ',' )
string = ', bob'
self.assertTrue( rule.parse( string ) )
self.assertEquals( ',', rule.result )
self.assertEquals( 'bob', rule.leftovers )
|
<commit_before><commit_msg>Test for newly refactored literal rule<commit_after>
|
import unittest
from mygrations.core.parse.rule_literal import rule_literal
class test_parse_regexp( unittest.TestCase ):
def get_rule( self, name, literal ):
return rule_literal( False, { 'name': name, 'value': literal }, {} )
def test_name_not_required( self ):
rule = self.get_rule( '', 'ASDF' )
self.assertEquals( rule.name, 'ASDF' )
def test_value_required( self ):
with self.assertRaises( ValueError ):
self.get_rule( 'bob', '' )
def test_can_init_with_name_and_value( self ):
rule = self.get_rule( 'bob', ',' )
self.assertEquals( rule.name, 'bob' )
self.assertEquals( rule.literal, ',' )
def test_match_beginning_only( self ):
rule = self.get_rule( 'bob', ',' )
self.assertFalse( rule.parse( 'hey,' ) )
self.assertEquals( '', rule.result )
def test_leftovers_is_input_for_no_match( self ):
rule = self.get_rule( 'bob', ',' )
string = 'hey,'
rule.parse( string )
self.assertEquals( string, rule.leftovers )
def test_no_leftovers_for_full_match( self ):
string = '23483438'
rule = self.get_rule( 'bob', string )
self.assertTrue( rule.parse( string ) )
self.assertEquals( string, rule.result )
self.assertEquals( '', rule.leftovers )
def test_calc_leftovers_trim( self ):
rule = self.get_rule( 'bob', ',' )
string = ', bob'
self.assertTrue( rule.parse( string ) )
self.assertEquals( ',', rule.result )
self.assertEquals( 'bob', rule.leftovers )
|
Test for newly refactored literal ruleimport unittest
from mygrations.core.parse.rule_literal import rule_literal
class test_parse_regexp( unittest.TestCase ):
def get_rule( self, name, literal ):
return rule_literal( False, { 'name': name, 'value': literal }, {} )
def test_name_not_required( self ):
rule = self.get_rule( '', 'ASDF' )
self.assertEquals( rule.name, 'ASDF' )
def test_value_required( self ):
with self.assertRaises( ValueError ):
self.get_rule( 'bob', '' )
def test_can_init_with_name_and_value( self ):
rule = self.get_rule( 'bob', ',' )
self.assertEquals( rule.name, 'bob' )
self.assertEquals( rule.literal, ',' )
def test_match_beginning_only( self ):
rule = self.get_rule( 'bob', ',' )
self.assertFalse( rule.parse( 'hey,' ) )
self.assertEquals( '', rule.result )
def test_leftovers_is_input_for_no_match( self ):
rule = self.get_rule( 'bob', ',' )
string = 'hey,'
rule.parse( string )
self.assertEquals( string, rule.leftovers )
def test_no_leftovers_for_full_match( self ):
string = '23483438'
rule = self.get_rule( 'bob', string )
self.assertTrue( rule.parse( string ) )
self.assertEquals( string, rule.result )
self.assertEquals( '', rule.leftovers )
def test_calc_leftovers_trim( self ):
rule = self.get_rule( 'bob', ',' )
string = ', bob'
self.assertTrue( rule.parse( string ) )
self.assertEquals( ',', rule.result )
self.assertEquals( 'bob', rule.leftovers )
|
<commit_before><commit_msg>Test for newly refactored literal rule<commit_after>import unittest
from mygrations.core.parse.rule_literal import rule_literal
class test_parse_regexp( unittest.TestCase ):
def get_rule( self, name, literal ):
return rule_literal( False, { 'name': name, 'value': literal }, {} )
def test_name_not_required( self ):
rule = self.get_rule( '', 'ASDF' )
self.assertEquals( rule.name, 'ASDF' )
def test_value_required( self ):
with self.assertRaises( ValueError ):
self.get_rule( 'bob', '' )
def test_can_init_with_name_and_value( self ):
rule = self.get_rule( 'bob', ',' )
self.assertEquals( rule.name, 'bob' )
self.assertEquals( rule.literal, ',' )
def test_match_beginning_only( self ):
rule = self.get_rule( 'bob', ',' )
self.assertFalse( rule.parse( 'hey,' ) )
self.assertEquals( '', rule.result )
def test_leftovers_is_input_for_no_match( self ):
rule = self.get_rule( 'bob', ',' )
string = 'hey,'
rule.parse( string )
self.assertEquals( string, rule.leftovers )
def test_no_leftovers_for_full_match( self ):
string = '23483438'
rule = self.get_rule( 'bob', string )
self.assertTrue( rule.parse( string ) )
self.assertEquals( string, rule.result )
self.assertEquals( '', rule.leftovers )
def test_calc_leftovers_trim( self ):
rule = self.get_rule( 'bob', ',' )
string = ', bob'
self.assertTrue( rule.parse( string ) )
self.assertEquals( ',', rule.result )
self.assertEquals( 'bob', rule.leftovers )
|
|
a72516f4faae6993d55b7a542ef9b686c6e659fb
|
bot/action/core/command/no_command.py
|
bot/action/core/command/no_command.py
|
from bot.action.core.action import IntermediateAction
from bot.action.core.command import CommandAction
class NoCommandAction(IntermediateAction):
def process(self, event):
for entity in self.get_entities(event):
if self.is_valid_command(entity):
break
else:
self._continue(event)
@staticmethod
def get_entities(event):
return CommandAction.get_entities(event)
@staticmethod
def is_valid_command(entity):
return CommandAction.is_valid_command(entity)
|
Add NoCommandAction to only continue execution when a non-command text event is received
|
Add NoCommandAction to only continue execution when a non-command text event is received
|
Python
|
agpl-3.0
|
alvarogzp/telegram-bot,alvarogzp/telegram-bot
|
Add NoCommandAction to only continue execution when a non-command text event is received
|
from bot.action.core.action import IntermediateAction
from bot.action.core.command import CommandAction
class NoCommandAction(IntermediateAction):
def process(self, event):
for entity in self.get_entities(event):
if self.is_valid_command(entity):
break
else:
self._continue(event)
@staticmethod
def get_entities(event):
return CommandAction.get_entities(event)
@staticmethod
def is_valid_command(entity):
return CommandAction.is_valid_command(entity)
|
<commit_before><commit_msg>Add NoCommandAction to only continue execution when a non-command text event is received<commit_after>
|
from bot.action.core.action import IntermediateAction
from bot.action.core.command import CommandAction
class NoCommandAction(IntermediateAction):
def process(self, event):
for entity in self.get_entities(event):
if self.is_valid_command(entity):
break
else:
self._continue(event)
@staticmethod
def get_entities(event):
return CommandAction.get_entities(event)
@staticmethod
def is_valid_command(entity):
return CommandAction.is_valid_command(entity)
|
Add NoCommandAction to only continue execution when a non-command text event is receivedfrom bot.action.core.action import IntermediateAction
from bot.action.core.command import CommandAction
class NoCommandAction(IntermediateAction):
def process(self, event):
for entity in self.get_entities(event):
if self.is_valid_command(entity):
break
else:
self._continue(event)
@staticmethod
def get_entities(event):
return CommandAction.get_entities(event)
@staticmethod
def is_valid_command(entity):
return CommandAction.is_valid_command(entity)
|
<commit_before><commit_msg>Add NoCommandAction to only continue execution when a non-command text event is received<commit_after>from bot.action.core.action import IntermediateAction
from bot.action.core.command import CommandAction
class NoCommandAction(IntermediateAction):
def process(self, event):
for entity in self.get_entities(event):
if self.is_valid_command(entity):
break
else:
self._continue(event)
@staticmethod
def get_entities(event):
return CommandAction.get_entities(event)
@staticmethod
def is_valid_command(entity):
return CommandAction.is_valid_command(entity)
|
|
e41ce4338334794466ba6918fc3b8a1f118d6b41
|
py/testdir_multi_jvm/test_gbm_prostate.py
|
py/testdir_multi_jvm/test_gbm_prostate.py
|
import sys
sys.path.insert(1, '../../h2o-py/src/main/py')
from h2o import H2OConnection
from h2o import H2OFrame
from h2o import H2OGBM
from tabulate import tabulate
######################################################
# Parse command-line args.
#
# usage: python test_name.py --usecloud ipaddr:port
#
ip_port = sys.argv[2].split(":")
print ip_port
ip = ip_port[0]
port = int(ip_port[1])
######################################################
#
# Sample Running GBM on prostate.csv
# Connect to a pre-existing cluster
cluster = H2OConnection(ip = ip, port = port)
df = H2OFrame(remote_fname="../../../smalldata/logreg/prostate.csv")
print df.describe()
# Remove ID from training frame
del df['ID']
# For VOL & GLEASON, a zero really means "missing"
vol = df['VOL']
vol[vol==0] = None
gle = df['GLEASON']
gle[gle==0] = None
# Convert CAPSULE to a logical factor
df['CAPSULE'] = df['CAPSULE'].asfactor()
# Test/train split
r = vol.runif()
train = df[r< 0.8]
test = df[r>=0.8]
# See that the data is ready
print train.describe()
print test .describe()
# Run GBM
gbm = H2OGBM(dataset=train,x="CAPSULE",validation_dataset=test,ntrees=50,max_depth=5,learn_rate=0.1)
#print gbm._model
mm = gbm.metrics()
mm0 = mm[0]
cm = mm0['cm']
conf = cm['confusion_matrix']
print tabulate(conf)
|
Add first example test of using h2o python API to gradle build regression suite.
|
Add first example test of using h2o python API to gradle build regression suite.
|
Python
|
apache-2.0
|
mrgloom/h2o-3,brightchen/h2o-3,kyoren/https-github.com-h2oai-h2o-3,nilbody/h2o-3,madmax983/h2o-3,jangorecki/h2o-3,pchmieli/h2o-3,kyoren/https-github.com-h2oai-h2o-3,madmax983/h2o-3,YzPaul3/h2o-3,ChristosChristofidis/h2o-3,brightchen/h2o-3,jangorecki/h2o-3,michalkurka/h2o-3,printedheart/h2o-3,h2oai/h2o-3,PawarPawan/h2o-v3,ChristosChristofidis/h2o-3,kyoren/https-github.com-h2oai-h2o-3,brightchen/h2o-3,datachand/h2o-3,spennihana/h2o-3,bikash/h2o-dev,tarasane/h2o-3,h2oai/h2o-3,pchmieli/h2o-3,weaver-viii/h2o-3,h2oai/h2o-3,bospetersen/h2o-3,madmax983/h2o-3,bospetersen/h2o-3,pchmieli/h2o-3,mathemage/h2o-3,nilbody/h2o-3,jangorecki/h2o-3,bikash/h2o-dev,nilbody/h2o-3,datachand/h2o-3,spennihana/h2o-3,madmax983/h2o-3,nilbody/h2o-3,madmax983/h2o-3,junwucs/h2o-3,mathemage/h2o-3,PawarPawan/h2o-v3,tarasane/h2o-3,YzPaul3/h2o-3,ChristosChristofidis/h2o-3,bikash/h2o-dev,ChristosChristofidis/h2o-3,YzPaul3/h2o-3,h2oai/h2o-dev,PawarPawan/h2o-v3,junwucs/h2o-3,datachand/h2o-3,bospetersen/h2o-3,jangorecki/h2o-3,YzPaul3/h2o-3,weaver-viii/h2o-3,kyoren/https-github.com-h2oai-h2o-3,printedheart/h2o-3,h2oai/h2o-3,madmax983/h2o-3,datachand/h2o-3,printedheart/h2o-3,junwucs/h2o-3,YzPaul3/h2o-3,bospetersen/h2o-3,jangorecki/h2o-3,bikash/h2o-dev,junwucs/h2o-3,h2oai/h2o-dev,michalkurka/h2o-3,ChristosChristofidis/h2o-3,michalkurka/h2o-3,tarasane/h2o-3,PawarPawan/h2o-v3,mathemage/h2o-3,jangorecki/h2o-3,michalkurka/h2o-3,brightchen/h2o-3,tarasane/h2o-3,bikash/h2o-dev,kyoren/https-github.com-h2oai-h2o-3,weaver-viii/h2o-3,michalkurka/h2o-3,mathemage/h2o-3,mrgloom/h2o-3,jangorecki/h2o-3,datachand/h2o-3,michalkurka/h2o-3,printedheart/h2o-3,mrgloom/h2o-3,bospetersen/h2o-3,nilbody/h2o-3,bikash/h2o-dev,weaver-viii/h2o-3,pchmieli/h2o-3,h2oai/h2o-3,tarasane/h2o-3,h2oai/h2o-dev,h2oai/h2o-dev,pchmieli/h2o-3,junwucs/h2o-3,printedheart/h2o-3,weaver-viii/h2o-3,spennihana/h2o-3,spennihana/h2o-3,datachand/h2o-3,brightchen/h2o-3,nilbody/h2o-3,junwucs/h2o-3,mathemage/h2o-3,printedheart/h2o-3,mathemage/h2o-3,bospetersen/h2o-3,spennihana/h2o-3,tarasane/h2o-3,pchmieli/h2o-3,junwucs/h2o-3,madmax983/h2o-3,mrgloom/h2o-3,printedheart/h2o-3,PawarPawan/h2o-v3,h2oai/h2o-3,PawarPawan/h2o-v3,YzPaul3/h2o-3,spennihana/h2o-3,kyoren/https-github.com-h2oai-h2o-3,michalkurka/h2o-3,mathemage/h2o-3,bospetersen/h2o-3,ChristosChristofidis/h2o-3,mrgloom/h2o-3,h2oai/h2o-3,brightchen/h2o-3,h2oai/h2o-dev,kyoren/https-github.com-h2oai-h2o-3,datachand/h2o-3,mrgloom/h2o-3,h2oai/h2o-3,PawarPawan/h2o-v3,weaver-viii/h2o-3,h2oai/h2o-dev,nilbody/h2o-3,spennihana/h2o-3,tarasane/h2o-3,brightchen/h2o-3,YzPaul3/h2o-3,weaver-viii/h2o-3,h2oai/h2o-dev,ChristosChristofidis/h2o-3,mrgloom/h2o-3,pchmieli/h2o-3
|
Add first example test of using h2o python API to gradle build regression suite.
|
import sys
sys.path.insert(1, '../../h2o-py/src/main/py')
from h2o import H2OConnection
from h2o import H2OFrame
from h2o import H2OGBM
from tabulate import tabulate
######################################################
# Parse command-line args.
#
# usage: python test_name.py --usecloud ipaddr:port
#
ip_port = sys.argv[2].split(":")
print ip_port
ip = ip_port[0]
port = int(ip_port[1])
######################################################
#
# Sample Running GBM on prostate.csv
# Connect to a pre-existing cluster
cluster = H2OConnection(ip = ip, port = port)
df = H2OFrame(remote_fname="../../../smalldata/logreg/prostate.csv")
print df.describe()
# Remove ID from training frame
del df['ID']
# For VOL & GLEASON, a zero really means "missing"
vol = df['VOL']
vol[vol==0] = None
gle = df['GLEASON']
gle[gle==0] = None
# Convert CAPSULE to a logical factor
df['CAPSULE'] = df['CAPSULE'].asfactor()
# Test/train split
r = vol.runif()
train = df[r< 0.8]
test = df[r>=0.8]
# See that the data is ready
print train.describe()
print test .describe()
# Run GBM
gbm = H2OGBM(dataset=train,x="CAPSULE",validation_dataset=test,ntrees=50,max_depth=5,learn_rate=0.1)
#print gbm._model
mm = gbm.metrics()
mm0 = mm[0]
cm = mm0['cm']
conf = cm['confusion_matrix']
print tabulate(conf)
|
<commit_before><commit_msg>Add first example test of using h2o python API to gradle build regression suite.<commit_after>
|
import sys
sys.path.insert(1, '../../h2o-py/src/main/py')
from h2o import H2OConnection
from h2o import H2OFrame
from h2o import H2OGBM
from tabulate import tabulate
######################################################
# Parse command-line args.
#
# usage: python test_name.py --usecloud ipaddr:port
#
ip_port = sys.argv[2].split(":")
print ip_port
ip = ip_port[0]
port = int(ip_port[1])
######################################################
#
# Sample Running GBM on prostate.csv
# Connect to a pre-existing cluster
cluster = H2OConnection(ip = ip, port = port)
df = H2OFrame(remote_fname="../../../smalldata/logreg/prostate.csv")
print df.describe()
# Remove ID from training frame
del df['ID']
# For VOL & GLEASON, a zero really means "missing"
vol = df['VOL']
vol[vol==0] = None
gle = df['GLEASON']
gle[gle==0] = None
# Convert CAPSULE to a logical factor
df['CAPSULE'] = df['CAPSULE'].asfactor()
# Test/train split
r = vol.runif()
train = df[r< 0.8]
test = df[r>=0.8]
# See that the data is ready
print train.describe()
print test .describe()
# Run GBM
gbm = H2OGBM(dataset=train,x="CAPSULE",validation_dataset=test,ntrees=50,max_depth=5,learn_rate=0.1)
#print gbm._model
mm = gbm.metrics()
mm0 = mm[0]
cm = mm0['cm']
conf = cm['confusion_matrix']
print tabulate(conf)
|
Add first example test of using h2o python API to gradle build regression suite.import sys
sys.path.insert(1, '../../h2o-py/src/main/py')
from h2o import H2OConnection
from h2o import H2OFrame
from h2o import H2OGBM
from tabulate import tabulate
######################################################
# Parse command-line args.
#
# usage: python test_name.py --usecloud ipaddr:port
#
ip_port = sys.argv[2].split(":")
print ip_port
ip = ip_port[0]
port = int(ip_port[1])
######################################################
#
# Sample Running GBM on prostate.csv
# Connect to a pre-existing cluster
cluster = H2OConnection(ip = ip, port = port)
df = H2OFrame(remote_fname="../../../smalldata/logreg/prostate.csv")
print df.describe()
# Remove ID from training frame
del df['ID']
# For VOL & GLEASON, a zero really means "missing"
vol = df['VOL']
vol[vol==0] = None
gle = df['GLEASON']
gle[gle==0] = None
# Convert CAPSULE to a logical factor
df['CAPSULE'] = df['CAPSULE'].asfactor()
# Test/train split
r = vol.runif()
train = df[r< 0.8]
test = df[r>=0.8]
# See that the data is ready
print train.describe()
print test .describe()
# Run GBM
gbm = H2OGBM(dataset=train,x="CAPSULE",validation_dataset=test,ntrees=50,max_depth=5,learn_rate=0.1)
#print gbm._model
mm = gbm.metrics()
mm0 = mm[0]
cm = mm0['cm']
conf = cm['confusion_matrix']
print tabulate(conf)
|
<commit_before><commit_msg>Add first example test of using h2o python API to gradle build regression suite.<commit_after>import sys
sys.path.insert(1, '../../h2o-py/src/main/py')
from h2o import H2OConnection
from h2o import H2OFrame
from h2o import H2OGBM
from tabulate import tabulate
######################################################
# Parse command-line args.
#
# usage: python test_name.py --usecloud ipaddr:port
#
ip_port = sys.argv[2].split(":")
print ip_port
ip = ip_port[0]
port = int(ip_port[1])
######################################################
#
# Sample Running GBM on prostate.csv
# Connect to a pre-existing cluster
cluster = H2OConnection(ip = ip, port = port)
df = H2OFrame(remote_fname="../../../smalldata/logreg/prostate.csv")
print df.describe()
# Remove ID from training frame
del df['ID']
# For VOL & GLEASON, a zero really means "missing"
vol = df['VOL']
vol[vol==0] = None
gle = df['GLEASON']
gle[gle==0] = None
# Convert CAPSULE to a logical factor
df['CAPSULE'] = df['CAPSULE'].asfactor()
# Test/train split
r = vol.runif()
train = df[r< 0.8]
test = df[r>=0.8]
# See that the data is ready
print train.describe()
print test .describe()
# Run GBM
gbm = H2OGBM(dataset=train,x="CAPSULE",validation_dataset=test,ntrees=50,max_depth=5,learn_rate=0.1)
#print gbm._model
mm = gbm.metrics()
mm0 = mm[0]
cm = mm0['cm']
conf = cm['confusion_matrix']
print tabulate(conf)
|
|
c27685da10c85cb9876b4c73012da3ebff1915dc
|
codingame/easy/horse-racing_duals.py
|
codingame/easy/horse-racing_duals.py
|
N = int(raw_input())
lst = []
# Read the list
for i in xrange(N):
lst.append(int(raw_input()))
# Sort the list, ascending order
a = sorted(lst)
# Find the min difference
print min(y-x for x,y in zip(a, a[1:]))
|
Add exercise horse racing duals
|
Add exercise horse racing duals
|
Python
|
mit
|
AntoineAugusti/katas,AntoineAugusti/katas,AntoineAugusti/katas
|
Add exercise horse racing duals
|
N = int(raw_input())
lst = []
# Read the list
for i in xrange(N):
lst.append(int(raw_input()))
# Sort the list, ascending order
a = sorted(lst)
# Find the min difference
print min(y-x for x,y in zip(a, a[1:]))
|
<commit_before><commit_msg>Add exercise horse racing duals<commit_after>
|
N = int(raw_input())
lst = []
# Read the list
for i in xrange(N):
lst.append(int(raw_input()))
# Sort the list, ascending order
a = sorted(lst)
# Find the min difference
print min(y-x for x,y in zip(a, a[1:]))
|
Add exercise horse racing dualsN = int(raw_input())
lst = []
# Read the list
for i in xrange(N):
lst.append(int(raw_input()))
# Sort the list, ascending order
a = sorted(lst)
# Find the min difference
print min(y-x for x,y in zip(a, a[1:]))
|
<commit_before><commit_msg>Add exercise horse racing duals<commit_after>N = int(raw_input())
lst = []
# Read the list
for i in xrange(N):
lst.append(int(raw_input()))
# Sort the list, ascending order
a = sorted(lst)
# Find the min difference
print min(y-x for x,y in zip(a, a[1:]))
|
|
717f3c5d4babe9feeb4e0d82fb2ea839d735c4b4
|
test/backend/test_database/test_common.py
|
test/backend/test_database/test_common.py
|
import mock
from linkr import db
import database.common
from test.backend.test_case import LinkrTestCase
class TestCommon(LinkrTestCase):
def test_create_tables(self):
with mock.patch.object(db, 'create_all') as mock_create:
database.common.create_tables()
self.assertTrue(mock_create.called)
|
Test database.common for full coverage of database
|
Test database.common for full coverage of database
|
Python
|
mit
|
LINKIWI/linkr,LINKIWI/linkr,LINKIWI/linkr
|
Test database.common for full coverage of database
|
import mock
from linkr import db
import database.common
from test.backend.test_case import LinkrTestCase
class TestCommon(LinkrTestCase):
def test_create_tables(self):
with mock.patch.object(db, 'create_all') as mock_create:
database.common.create_tables()
self.assertTrue(mock_create.called)
|
<commit_before><commit_msg>Test database.common for full coverage of database<commit_after>
|
import mock
from linkr import db
import database.common
from test.backend.test_case import LinkrTestCase
class TestCommon(LinkrTestCase):
def test_create_tables(self):
with mock.patch.object(db, 'create_all') as mock_create:
database.common.create_tables()
self.assertTrue(mock_create.called)
|
Test database.common for full coverage of databaseimport mock
from linkr import db
import database.common
from test.backend.test_case import LinkrTestCase
class TestCommon(LinkrTestCase):
def test_create_tables(self):
with mock.patch.object(db, 'create_all') as mock_create:
database.common.create_tables()
self.assertTrue(mock_create.called)
|
<commit_before><commit_msg>Test database.common for full coverage of database<commit_after>import mock
from linkr import db
import database.common
from test.backend.test_case import LinkrTestCase
class TestCommon(LinkrTestCase):
def test_create_tables(self):
with mock.patch.object(db, 'create_all') as mock_create:
database.common.create_tables()
self.assertTrue(mock_create.called)
|
|
4316122225d2e523ff310f65479ea676e0aa02e3
|
load_data_sets.py
|
load_data_sets.py
|
import os
import numpy as np
import sgf_wrapper
def load_sgf_positions(*dataset_names):
for dataset in dataset_names:
dataset_dir = os.path.join(os.getcwd(), 'data', dataset)
dataset_files = [os.path.join(dataset_dir, name) for name in os.listdir(dataset_dir)]
all_datafiles = filter(os.path.isfile, dataset_files)
for file in all_datafiles:
with open(file) as f:
sgf = sgf_wrapper.SgfWrapper(f.read())
for position_w_context in sgf.get_main_branch():
if position_w_context.is_usable():
yield position_w_context
def extract_features(features, positions):
num_feature_planes = sum(f.planes for f in features)
num_positions = len(positions)
output = np.zeros([num_positions, 19, 19, num_feature_planes])
for i, pos in enumerate(positions):
output[i] = np.concatenate([feature.extract(pos) for feature in features], axis=2)
return output
|
Add methods for loading data sets
|
Add methods for loading data sets
|
Python
|
apache-2.0
|
brilee/MuGo
|
Add methods for loading data sets
|
import os
import numpy as np
import sgf_wrapper
def load_sgf_positions(*dataset_names):
for dataset in dataset_names:
dataset_dir = os.path.join(os.getcwd(), 'data', dataset)
dataset_files = [os.path.join(dataset_dir, name) for name in os.listdir(dataset_dir)]
all_datafiles = filter(os.path.isfile, dataset_files)
for file in all_datafiles:
with open(file) as f:
sgf = sgf_wrapper.SgfWrapper(f.read())
for position_w_context in sgf.get_main_branch():
if position_w_context.is_usable():
yield position_w_context
def extract_features(features, positions):
num_feature_planes = sum(f.planes for f in features)
num_positions = len(positions)
output = np.zeros([num_positions, 19, 19, num_feature_planes])
for i, pos in enumerate(positions):
output[i] = np.concatenate([feature.extract(pos) for feature in features], axis=2)
return output
|
<commit_before><commit_msg>Add methods for loading data sets<commit_after>
|
import os
import numpy as np
import sgf_wrapper
def load_sgf_positions(*dataset_names):
for dataset in dataset_names:
dataset_dir = os.path.join(os.getcwd(), 'data', dataset)
dataset_files = [os.path.join(dataset_dir, name) for name in os.listdir(dataset_dir)]
all_datafiles = filter(os.path.isfile, dataset_files)
for file in all_datafiles:
with open(file) as f:
sgf = sgf_wrapper.SgfWrapper(f.read())
for position_w_context in sgf.get_main_branch():
if position_w_context.is_usable():
yield position_w_context
def extract_features(features, positions):
num_feature_planes = sum(f.planes for f in features)
num_positions = len(positions)
output = np.zeros([num_positions, 19, 19, num_feature_planes])
for i, pos in enumerate(positions):
output[i] = np.concatenate([feature.extract(pos) for feature in features], axis=2)
return output
|
Add methods for loading data setsimport os
import numpy as np
import sgf_wrapper
def load_sgf_positions(*dataset_names):
for dataset in dataset_names:
dataset_dir = os.path.join(os.getcwd(), 'data', dataset)
dataset_files = [os.path.join(dataset_dir, name) for name in os.listdir(dataset_dir)]
all_datafiles = filter(os.path.isfile, dataset_files)
for file in all_datafiles:
with open(file) as f:
sgf = sgf_wrapper.SgfWrapper(f.read())
for position_w_context in sgf.get_main_branch():
if position_w_context.is_usable():
yield position_w_context
def extract_features(features, positions):
num_feature_planes = sum(f.planes for f in features)
num_positions = len(positions)
output = np.zeros([num_positions, 19, 19, num_feature_planes])
for i, pos in enumerate(positions):
output[i] = np.concatenate([feature.extract(pos) for feature in features], axis=2)
return output
|
<commit_before><commit_msg>Add methods for loading data sets<commit_after>import os
import numpy as np
import sgf_wrapper
def load_sgf_positions(*dataset_names):
for dataset in dataset_names:
dataset_dir = os.path.join(os.getcwd(), 'data', dataset)
dataset_files = [os.path.join(dataset_dir, name) for name in os.listdir(dataset_dir)]
all_datafiles = filter(os.path.isfile, dataset_files)
for file in all_datafiles:
with open(file) as f:
sgf = sgf_wrapper.SgfWrapper(f.read())
for position_w_context in sgf.get_main_branch():
if position_w_context.is_usable():
yield position_w_context
def extract_features(features, positions):
num_feature_planes = sum(f.planes for f in features)
num_positions = len(positions)
output = np.zeros([num_positions, 19, 19, num_feature_planes])
for i, pos in enumerate(positions):
output[i] = np.concatenate([feature.extract(pos) for feature in features], axis=2)
return output
|
|
612fb44b33c4f52488f3565c009188d61a8343c2
|
python/autojoin_on_invite.py
|
python/autojoin_on_invite.py
|
__module_name__ = "autojoin on invite"
__module_version__ = "1.0"
import hexchat
def join(word, word_eol, userdata):
hexchat.command('join ' + word[0])
hexchat.hook_print('Invited', join)
|
Add an auto join script
|
Add an auto join script
|
Python
|
apache-2.0
|
arai-wa/hexchat-addons
|
Add an auto join script
|
__module_name__ = "autojoin on invite"
__module_version__ = "1.0"
import hexchat
def join(word, word_eol, userdata):
hexchat.command('join ' + word[0])
hexchat.hook_print('Invited', join)
|
<commit_before><commit_msg>Add an auto join script<commit_after>
|
__module_name__ = "autojoin on invite"
__module_version__ = "1.0"
import hexchat
def join(word, word_eol, userdata):
hexchat.command('join ' + word[0])
hexchat.hook_print('Invited', join)
|
Add an auto join script__module_name__ = "autojoin on invite"
__module_version__ = "1.0"
import hexchat
def join(word, word_eol, userdata):
hexchat.command('join ' + word[0])
hexchat.hook_print('Invited', join)
|
<commit_before><commit_msg>Add an auto join script<commit_after>__module_name__ = "autojoin on invite"
__module_version__ = "1.0"
import hexchat
def join(word, word_eol, userdata):
hexchat.command('join ' + word[0])
hexchat.hook_print('Invited', join)
|
|
3fd3b376b1334dba0ffea3641dcbb32d788f4083
|
scripts/fix_templated_orphans.py
|
scripts/fix_templated_orphans.py
|
# -*- coding: utf-8 -*-
"""Find orphaned templated nodes without parents, then attempt to identify and
restore their parent nodes. Due to a bug in templating that has since been
fixed, several templated nodes were not attached to the `nodes` lists of their
parents.
"""
import logging
from modularodm import Q
from framework.auth import Auth
from website.models import Node
from website.app import init_app
from nose.tools import *
from tests.base import OsfTestCase
from tests.factories import ProjectFactory, NodeFactory
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def find_candidate_parents(node):
return Node.find(
Q('logs', 'eq', node.logs[0]._id) &
Q('is_fork', 'eq', node.is_fork) &
Q('is_registration', 'eq', node.is_registration)
)
def resolve_templated_orphan(orphan):
candidate_parents = find_candidate_parents(orphan)
if candidate_parents.count() != 1:
logger.warn('Could not identify unique candidate parent for node {}'.format(orphan._id))
return
if candidate_parents[0].date_created != orphan.date_created:
logger.warn('Creation dates of candidate parent and orphan {} did not match'.format(orphan._id))
logger.info('Adding orphan to `nodes` list of candidate parent')
candidate_parents[0].nodes.append(orphan)
candidate_parents[0].save()
def find_templated_orphans():
return Node.find(
Q('template_node', 'ne', None) &
Q('category', 'ne', 'project') &
Q('__backrefs.parent.node.nodes.0', 'exists', False)
)
if __name__ == '__main__':
init_app()
orphans = find_templated_orphans()
for orphan in orphans:
resolve_templated_orphan(orphan)
class TestResolveTemplatedOrphans(OsfTestCase):
def setUp(self):
super(TestResolveTemplatedOrphans, self).setUp()
self.node = NodeFactory()
self.project = ProjectFactory(creator=self.node.creator)
self.project.nodes.append(self.node)
self.project.save()
self.templated_project = self.project.use_as_template(
Auth(self.node.creator)
)
self.templated_node = self.templated_project.nodes[0]
self.templated_project.nodes = []
self.templated_project.save()
def test_find(self):
orphans = find_templated_orphans()
assert_equal(orphans.count(), 1)
assert_equal(orphans[0], self.templated_node)
def test_resolve(self):
assert_not_in(self.templated_node, self.templated_project.nodes)
resolve_templated_orphan(self.node)
assert_in(self.node, self.project.nodes)
|
Add migration script to fix templated orphans.
|
Add migration script to fix templated orphans.
Restore parents to orphaned nodes created during templating.
See scripts/fix_templated_orphans.py for details.
|
Python
|
apache-2.0
|
amyshi188/osf.io,fabianvf/osf.io,mluke93/osf.io,GaryKriebel/osf.io,KAsante95/osf.io,ckc6cz/osf.io,revanthkolli/osf.io,cslzchen/osf.io,Nesiehr/osf.io,zkraime/osf.io,reinaH/osf.io,mluke93/osf.io,haoyuchen1992/osf.io,ckc6cz/osf.io,asanfilippo7/osf.io,laurenrevere/osf.io,zkraime/osf.io,aaxelb/osf.io,emetsger/osf.io,Ghalko/osf.io,HarryRybacki/osf.io,billyhunt/osf.io,monikagrabowska/osf.io,fabianvf/osf.io,rdhyee/osf.io,zachjanicki/osf.io,laurenrevere/osf.io,Johnetordoff/osf.io,ticklemepierce/osf.io,samchrisinger/osf.io,jinluyuan/osf.io,adlius/osf.io,samchrisinger/osf.io,arpitar/osf.io,doublebits/osf.io,GageGaskins/osf.io,KAsante95/osf.io,Nesiehr/osf.io,jmcarp/osf.io,dplorimer/osf,felliott/osf.io,bdyetton/prettychart,cwisecarver/osf.io,billyhunt/osf.io,pattisdr/osf.io,billyhunt/osf.io,sbt9uc/osf.io,caseyrygt/osf.io,mfraezz/osf.io,zachjanicki/osf.io,GaryKriebel/osf.io,Ghalko/osf.io,felliott/osf.io,brandonPurvis/osf.io,asanfilippo7/osf.io,baylee-d/osf.io,HalcyonChimera/osf.io,brandonPurvis/osf.io,lyndsysimon/osf.io,doublebits/osf.io,petermalcolm/osf.io,jmcarp/osf.io,rdhyee/osf.io,zachjanicki/osf.io,wearpants/osf.io,mattclark/osf.io,wearpants/osf.io,kushG/osf.io,emetsger/osf.io,jmcarp/osf.io,mluo613/osf.io,alexschiller/osf.io,binoculars/osf.io,sloria/osf.io,chrisseto/osf.io,lamdnhan/osf.io,danielneis/osf.io,samchrisinger/osf.io,kch8qx/osf.io,njantrania/osf.io,Johnetordoff/osf.io,zamattiac/osf.io,chrisseto/osf.io,doublebits/osf.io,danielneis/osf.io,TomHeatwole/osf.io,mattclark/osf.io,ckc6cz/osf.io,chrisseto/osf.io,icereval/osf.io,mluo613/osf.io,mluke93/osf.io,kch8qx/osf.io,samanehsan/osf.io,revanthkolli/osf.io,cwisecarver/osf.io,mluo613/osf.io,lyndsysimon/osf.io,GageGaskins/osf.io,erinspace/osf.io,himanshuo/osf.io,kch8qx/osf.io,MerlinZhang/osf.io,KAsante95/osf.io,CenterForOpenScience/osf.io,acshi/osf.io,brandonPurvis/osf.io,lamdnhan/osf.io,kwierman/osf.io,hmoco/osf.io,njantrania/osf.io,aaxelb/osf.io,acshi/osf.io,kushG/osf.io,brianjgeiger/osf.io,hmoco/osf.io,himanshuo/osf.io,ticklemepierce/osf.io,chrisseto/osf.io,alexschiller/osf.io,sloria/osf.io,caneruguz/osf.io,haoyuchen1992/osf.io,MerlinZhang/osf.io,acshi/osf.io,monikagrabowska/osf.io,TomHeatwole/osf.io,adlius/osf.io,erinspace/osf.io,adlius/osf.io,arpitar/osf.io,caneruguz/osf.io,jeffreyliu3230/osf.io,CenterForOpenScience/osf.io,asanfilippo7/osf.io,kwierman/osf.io,jolene-esposito/osf.io,jeffreyliu3230/osf.io,jnayak1/osf.io,monikagrabowska/osf.io,SSJohns/osf.io,Nesiehr/osf.io,amyshi188/osf.io,RomanZWang/osf.io,ZobairAlijan/osf.io,felliott/osf.io,RomanZWang/osf.io,wearpants/osf.io,lyndsysimon/osf.io,brandonPurvis/osf.io,laurenrevere/osf.io,crcresearch/osf.io,MerlinZhang/osf.io,jnayak1/osf.io,jmcarp/osf.io,KAsante95/osf.io,SSJohns/osf.io,billyhunt/osf.io,DanielSBrown/osf.io,haoyuchen1992/osf.io,kwierman/osf.io,HarryRybacki/osf.io,kch8qx/osf.io,Ghalko/osf.io,baylee-d/osf.io,caneruguz/osf.io,jeffreyliu3230/osf.io,haoyuchen1992/osf.io,alexschiller/osf.io,barbour-em/osf.io,wearpants/osf.io,DanielSBrown/osf.io,cslzchen/osf.io,binoculars/osf.io,sbt9uc/osf.io,caseyrollins/osf.io,zamattiac/osf.io,barbour-em/osf.io,icereval/osf.io,abought/osf.io,mluke93/osf.io,brianjgeiger/osf.io,cldershem/osf.io,caseyrollins/osf.io,himanshuo/osf.io,ckc6cz/osf.io,DanielSBrown/osf.io,lyndsysimon/osf.io,samanehsan/osf.io,reinaH/osf.io,jeffreyliu3230/osf.io,TomHeatwole/osf.io,jinluyuan/osf.io,jolene-esposito/osf.io,GaryKriebel/osf.io,dplorimer/osf,fabianvf/osf.io,mfraezz/osf.io,cldershem/osf.io,aaxelb/osf.io,njantrania/osf.io,rdhyee/osf.io,danielneis/osf.io,acshi/osf.io,Ghalko/osf.io,kch8qx/osf.io,icereval/osf.io,cosenal/osf.io,hmoco/osf.io,ticklemepierce/osf.io,TomBaxter/osf.io,pattisdr/osf.io,caneruguz/osf.io,bdyetton/prettychart,cldershem/osf.io,chennan47/osf.io,cosenal/osf.io,erinspace/osf.io,ticklemepierce/osf.io,HalcyonChimera/osf.io,zachjanicki/osf.io,revanthkolli/osf.io,GageGaskins/osf.io,fabianvf/osf.io,abought/osf.io,amyshi188/osf.io,aaxelb/osf.io,RomanZWang/osf.io,CenterForOpenScience/osf.io,brandonPurvis/osf.io,ZobairAlijan/osf.io,brianjgeiger/osf.io,Johnetordoff/osf.io,brianjgeiger/osf.io,GageGaskins/osf.io,HarryRybacki/osf.io,lamdnhan/osf.io,bdyetton/prettychart,DanielSBrown/osf.io,leb2dg/osf.io,jinluyuan/osf.io,crcresearch/osf.io,himanshuo/osf.io,asanfilippo7/osf.io,SSJohns/osf.io,pattisdr/osf.io,monikagrabowska/osf.io,abought/osf.io,amyshi188/osf.io,TomHeatwole/osf.io,SSJohns/osf.io,HalcyonChimera/osf.io,saradbowman/osf.io,petermalcolm/osf.io,emetsger/osf.io,njantrania/osf.io,zkraime/osf.io,mluo613/osf.io,crcresearch/osf.io,cldershem/osf.io,felliott/osf.io,kwierman/osf.io,zamattiac/osf.io,KAsante95/osf.io,jolene-esposito/osf.io,saradbowman/osf.io,jinluyuan/osf.io,caseyrygt/osf.io,rdhyee/osf.io,cslzchen/osf.io,AndrewSallans/osf.io,TomBaxter/osf.io,cwisecarver/osf.io,samchrisinger/osf.io,sloria/osf.io,cosenal/osf.io,alexschiller/osf.io,cslzchen/osf.io,mfraezz/osf.io,mattclark/osf.io,zkraime/osf.io,RomanZWang/osf.io,leb2dg/osf.io,acshi/osf.io,HalcyonChimera/osf.io,Johnetordoff/osf.io,caseyrygt/osf.io,emetsger/osf.io,TomBaxter/osf.io,lamdnhan/osf.io,arpitar/osf.io,cwisecarver/osf.io,doublebits/osf.io,jolene-esposito/osf.io,GageGaskins/osf.io,dplorimer/osf,ZobairAlijan/osf.io,sbt9uc/osf.io,doublebits/osf.io,MerlinZhang/osf.io,leb2dg/osf.io,samanehsan/osf.io,kushG/osf.io,zamattiac/osf.io,kushG/osf.io,caseyrygt/osf.io,samanehsan/osf.io,sbt9uc/osf.io,jnayak1/osf.io,baylee-d/osf.io,monikagrabowska/osf.io,RomanZWang/osf.io,revanthkolli/osf.io,dplorimer/osf,reinaH/osf.io,GaryKriebel/osf.io,leb2dg/osf.io,AndrewSallans/osf.io,chennan47/osf.io,billyhunt/osf.io,petermalcolm/osf.io,jnayak1/osf.io,mfraezz/osf.io,bdyetton/prettychart,danielneis/osf.io,CenterForOpenScience/osf.io,cosenal/osf.io,mluo613/osf.io,chennan47/osf.io,ZobairAlijan/osf.io,binoculars/osf.io,barbour-em/osf.io,reinaH/osf.io,abought/osf.io,adlius/osf.io,arpitar/osf.io,petermalcolm/osf.io,hmoco/osf.io,alexschiller/osf.io,Nesiehr/osf.io,caseyrollins/osf.io,HarryRybacki/osf.io,barbour-em/osf.io
|
Add migration script to fix templated orphans.
Restore parents to orphaned nodes created during templating.
See scripts/fix_templated_orphans.py for details.
|
# -*- coding: utf-8 -*-
"""Find orphaned templated nodes without parents, then attempt to identify and
restore their parent nodes. Due to a bug in templating that has since been
fixed, several templated nodes were not attached to the `nodes` lists of their
parents.
"""
import logging
from modularodm import Q
from framework.auth import Auth
from website.models import Node
from website.app import init_app
from nose.tools import *
from tests.base import OsfTestCase
from tests.factories import ProjectFactory, NodeFactory
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def find_candidate_parents(node):
return Node.find(
Q('logs', 'eq', node.logs[0]._id) &
Q('is_fork', 'eq', node.is_fork) &
Q('is_registration', 'eq', node.is_registration)
)
def resolve_templated_orphan(orphan):
candidate_parents = find_candidate_parents(orphan)
if candidate_parents.count() != 1:
logger.warn('Could not identify unique candidate parent for node {}'.format(orphan._id))
return
if candidate_parents[0].date_created != orphan.date_created:
logger.warn('Creation dates of candidate parent and orphan {} did not match'.format(orphan._id))
logger.info('Adding orphan to `nodes` list of candidate parent')
candidate_parents[0].nodes.append(orphan)
candidate_parents[0].save()
def find_templated_orphans():
return Node.find(
Q('template_node', 'ne', None) &
Q('category', 'ne', 'project') &
Q('__backrefs.parent.node.nodes.0', 'exists', False)
)
if __name__ == '__main__':
init_app()
orphans = find_templated_orphans()
for orphan in orphans:
resolve_templated_orphan(orphan)
class TestResolveTemplatedOrphans(OsfTestCase):
def setUp(self):
super(TestResolveTemplatedOrphans, self).setUp()
self.node = NodeFactory()
self.project = ProjectFactory(creator=self.node.creator)
self.project.nodes.append(self.node)
self.project.save()
self.templated_project = self.project.use_as_template(
Auth(self.node.creator)
)
self.templated_node = self.templated_project.nodes[0]
self.templated_project.nodes = []
self.templated_project.save()
def test_find(self):
orphans = find_templated_orphans()
assert_equal(orphans.count(), 1)
assert_equal(orphans[0], self.templated_node)
def test_resolve(self):
assert_not_in(self.templated_node, self.templated_project.nodes)
resolve_templated_orphan(self.node)
assert_in(self.node, self.project.nodes)
|
<commit_before><commit_msg>Add migration script to fix templated orphans.
Restore parents to orphaned nodes created during templating.
See scripts/fix_templated_orphans.py for details.<commit_after>
|
# -*- coding: utf-8 -*-
"""Find orphaned templated nodes without parents, then attempt to identify and
restore their parent nodes. Due to a bug in templating that has since been
fixed, several templated nodes were not attached to the `nodes` lists of their
parents.
"""
import logging
from modularodm import Q
from framework.auth import Auth
from website.models import Node
from website.app import init_app
from nose.tools import *
from tests.base import OsfTestCase
from tests.factories import ProjectFactory, NodeFactory
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def find_candidate_parents(node):
return Node.find(
Q('logs', 'eq', node.logs[0]._id) &
Q('is_fork', 'eq', node.is_fork) &
Q('is_registration', 'eq', node.is_registration)
)
def resolve_templated_orphan(orphan):
candidate_parents = find_candidate_parents(orphan)
if candidate_parents.count() != 1:
logger.warn('Could not identify unique candidate parent for node {}'.format(orphan._id))
return
if candidate_parents[0].date_created != orphan.date_created:
logger.warn('Creation dates of candidate parent and orphan {} did not match'.format(orphan._id))
logger.info('Adding orphan to `nodes` list of candidate parent')
candidate_parents[0].nodes.append(orphan)
candidate_parents[0].save()
def find_templated_orphans():
return Node.find(
Q('template_node', 'ne', None) &
Q('category', 'ne', 'project') &
Q('__backrefs.parent.node.nodes.0', 'exists', False)
)
if __name__ == '__main__':
init_app()
orphans = find_templated_orphans()
for orphan in orphans:
resolve_templated_orphan(orphan)
class TestResolveTemplatedOrphans(OsfTestCase):
def setUp(self):
super(TestResolveTemplatedOrphans, self).setUp()
self.node = NodeFactory()
self.project = ProjectFactory(creator=self.node.creator)
self.project.nodes.append(self.node)
self.project.save()
self.templated_project = self.project.use_as_template(
Auth(self.node.creator)
)
self.templated_node = self.templated_project.nodes[0]
self.templated_project.nodes = []
self.templated_project.save()
def test_find(self):
orphans = find_templated_orphans()
assert_equal(orphans.count(), 1)
assert_equal(orphans[0], self.templated_node)
def test_resolve(self):
assert_not_in(self.templated_node, self.templated_project.nodes)
resolve_templated_orphan(self.node)
assert_in(self.node, self.project.nodes)
|
Add migration script to fix templated orphans.
Restore parents to orphaned nodes created during templating.
See scripts/fix_templated_orphans.py for details.# -*- coding: utf-8 -*-
"""Find orphaned templated nodes without parents, then attempt to identify and
restore their parent nodes. Due to a bug in templating that has since been
fixed, several templated nodes were not attached to the `nodes` lists of their
parents.
"""
import logging
from modularodm import Q
from framework.auth import Auth
from website.models import Node
from website.app import init_app
from nose.tools import *
from tests.base import OsfTestCase
from tests.factories import ProjectFactory, NodeFactory
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def find_candidate_parents(node):
return Node.find(
Q('logs', 'eq', node.logs[0]._id) &
Q('is_fork', 'eq', node.is_fork) &
Q('is_registration', 'eq', node.is_registration)
)
def resolve_templated_orphan(orphan):
candidate_parents = find_candidate_parents(orphan)
if candidate_parents.count() != 1:
logger.warn('Could not identify unique candidate parent for node {}'.format(orphan._id))
return
if candidate_parents[0].date_created != orphan.date_created:
logger.warn('Creation dates of candidate parent and orphan {} did not match'.format(orphan._id))
logger.info('Adding orphan to `nodes` list of candidate parent')
candidate_parents[0].nodes.append(orphan)
candidate_parents[0].save()
def find_templated_orphans():
return Node.find(
Q('template_node', 'ne', None) &
Q('category', 'ne', 'project') &
Q('__backrefs.parent.node.nodes.0', 'exists', False)
)
if __name__ == '__main__':
init_app()
orphans = find_templated_orphans()
for orphan in orphans:
resolve_templated_orphan(orphan)
class TestResolveTemplatedOrphans(OsfTestCase):
def setUp(self):
super(TestResolveTemplatedOrphans, self).setUp()
self.node = NodeFactory()
self.project = ProjectFactory(creator=self.node.creator)
self.project.nodes.append(self.node)
self.project.save()
self.templated_project = self.project.use_as_template(
Auth(self.node.creator)
)
self.templated_node = self.templated_project.nodes[0]
self.templated_project.nodes = []
self.templated_project.save()
def test_find(self):
orphans = find_templated_orphans()
assert_equal(orphans.count(), 1)
assert_equal(orphans[0], self.templated_node)
def test_resolve(self):
assert_not_in(self.templated_node, self.templated_project.nodes)
resolve_templated_orphan(self.node)
assert_in(self.node, self.project.nodes)
|
<commit_before><commit_msg>Add migration script to fix templated orphans.
Restore parents to orphaned nodes created during templating.
See scripts/fix_templated_orphans.py for details.<commit_after># -*- coding: utf-8 -*-
"""Find orphaned templated nodes without parents, then attempt to identify and
restore their parent nodes. Due to a bug in templating that has since been
fixed, several templated nodes were not attached to the `nodes` lists of their
parents.
"""
import logging
from modularodm import Q
from framework.auth import Auth
from website.models import Node
from website.app import init_app
from nose.tools import *
from tests.base import OsfTestCase
from tests.factories import ProjectFactory, NodeFactory
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def find_candidate_parents(node):
return Node.find(
Q('logs', 'eq', node.logs[0]._id) &
Q('is_fork', 'eq', node.is_fork) &
Q('is_registration', 'eq', node.is_registration)
)
def resolve_templated_orphan(orphan):
candidate_parents = find_candidate_parents(orphan)
if candidate_parents.count() != 1:
logger.warn('Could not identify unique candidate parent for node {}'.format(orphan._id))
return
if candidate_parents[0].date_created != orphan.date_created:
logger.warn('Creation dates of candidate parent and orphan {} did not match'.format(orphan._id))
logger.info('Adding orphan to `nodes` list of candidate parent')
candidate_parents[0].nodes.append(orphan)
candidate_parents[0].save()
def find_templated_orphans():
return Node.find(
Q('template_node', 'ne', None) &
Q('category', 'ne', 'project') &
Q('__backrefs.parent.node.nodes.0', 'exists', False)
)
if __name__ == '__main__':
init_app()
orphans = find_templated_orphans()
for orphan in orphans:
resolve_templated_orphan(orphan)
class TestResolveTemplatedOrphans(OsfTestCase):
def setUp(self):
super(TestResolveTemplatedOrphans, self).setUp()
self.node = NodeFactory()
self.project = ProjectFactory(creator=self.node.creator)
self.project.nodes.append(self.node)
self.project.save()
self.templated_project = self.project.use_as_template(
Auth(self.node.creator)
)
self.templated_node = self.templated_project.nodes[0]
self.templated_project.nodes = []
self.templated_project.save()
def test_find(self):
orphans = find_templated_orphans()
assert_equal(orphans.count(), 1)
assert_equal(orphans[0], self.templated_node)
def test_resolve(self):
assert_not_in(self.templated_node, self.templated_project.nodes)
resolve_templated_orphan(self.node)
assert_in(self.node, self.project.nodes)
|
|
df17f792faab74955f5e9573bf7dd9812b489bd3
|
hybridization_solver.py
|
hybridization_solver.py
|
from __future__ import absolute_import, print_function, division
from firedrake import *
qflag = False
degree = 1
mesh = UnitSquareMesh(8, 8, quadrilateral=qflag)
n = FacetNormal(mesh)
if qflag:
RT = FiniteElement("RTCF", quadrilateral, degree)
DG = FiniteElement("DQ", quadrilateral, degree - 1)
Te = FiniteElement("HDiv Trace", quadrilateral, degree - 1)
else:
RT = FiniteElement("RT", triangle, degree)
DG = FiniteElement("DG", triangle, degree - 1)
Te = FiniteElement("HDiv Trace", triangle, degree - 1)
Vd = FunctionSpace(mesh, BrokenElement(RT))
U = FunctionSpace(mesh, DG)
T = FunctionSpace(mesh, Te)
W = Vd * U
sigma, u = TrialFunctions(W)
tau, v = TestFunctions(W)
f = Function(U)
x, y = SpatialCoordinate(mesh)
f.interpolate((1 + 8*pi*pi)*sin(2*pi*x)*sin(2*pi*y))
a = (dot(sigma, tau) - div(tau)*u + v*div(sigma) + u*v)*dx
L = f*v*dx
bcs = DirichletBC(T, Constant(0.0), (1, 2, 3, 4))
gammar = TestFunction(T)
trace_form = dot(sigma, n)*gammar('+')*dS
K = Tensor(trace_form)
A = Tensor(a)
F = Tensor(L)
S = assemble(K * A.inv * K.T, bcs=bcs)
E = assemble(K * A.inv * F)
lambda_sol = Function(T)
solve(S, lambda_sol, E, solver_parameters={'pc_type': 'lu',
'ksp_type': 'preonly'})
|
Add a hybridization example using Slate manually
|
Add a hybridization example using Slate manually
|
Python
|
mit
|
thomasgibson/firedrake-hybridization
|
Add a hybridization example using Slate manually
|
from __future__ import absolute_import, print_function, division
from firedrake import *
qflag = False
degree = 1
mesh = UnitSquareMesh(8, 8, quadrilateral=qflag)
n = FacetNormal(mesh)
if qflag:
RT = FiniteElement("RTCF", quadrilateral, degree)
DG = FiniteElement("DQ", quadrilateral, degree - 1)
Te = FiniteElement("HDiv Trace", quadrilateral, degree - 1)
else:
RT = FiniteElement("RT", triangle, degree)
DG = FiniteElement("DG", triangle, degree - 1)
Te = FiniteElement("HDiv Trace", triangle, degree - 1)
Vd = FunctionSpace(mesh, BrokenElement(RT))
U = FunctionSpace(mesh, DG)
T = FunctionSpace(mesh, Te)
W = Vd * U
sigma, u = TrialFunctions(W)
tau, v = TestFunctions(W)
f = Function(U)
x, y = SpatialCoordinate(mesh)
f.interpolate((1 + 8*pi*pi)*sin(2*pi*x)*sin(2*pi*y))
a = (dot(sigma, tau) - div(tau)*u + v*div(sigma) + u*v)*dx
L = f*v*dx
bcs = DirichletBC(T, Constant(0.0), (1, 2, 3, 4))
gammar = TestFunction(T)
trace_form = dot(sigma, n)*gammar('+')*dS
K = Tensor(trace_form)
A = Tensor(a)
F = Tensor(L)
S = assemble(K * A.inv * K.T, bcs=bcs)
E = assemble(K * A.inv * F)
lambda_sol = Function(T)
solve(S, lambda_sol, E, solver_parameters={'pc_type': 'lu',
'ksp_type': 'preonly'})
|
<commit_before><commit_msg>Add a hybridization example using Slate manually<commit_after>
|
from __future__ import absolute_import, print_function, division
from firedrake import *
qflag = False
degree = 1
mesh = UnitSquareMesh(8, 8, quadrilateral=qflag)
n = FacetNormal(mesh)
if qflag:
RT = FiniteElement("RTCF", quadrilateral, degree)
DG = FiniteElement("DQ", quadrilateral, degree - 1)
Te = FiniteElement("HDiv Trace", quadrilateral, degree - 1)
else:
RT = FiniteElement("RT", triangle, degree)
DG = FiniteElement("DG", triangle, degree - 1)
Te = FiniteElement("HDiv Trace", triangle, degree - 1)
Vd = FunctionSpace(mesh, BrokenElement(RT))
U = FunctionSpace(mesh, DG)
T = FunctionSpace(mesh, Te)
W = Vd * U
sigma, u = TrialFunctions(W)
tau, v = TestFunctions(W)
f = Function(U)
x, y = SpatialCoordinate(mesh)
f.interpolate((1 + 8*pi*pi)*sin(2*pi*x)*sin(2*pi*y))
a = (dot(sigma, tau) - div(tau)*u + v*div(sigma) + u*v)*dx
L = f*v*dx
bcs = DirichletBC(T, Constant(0.0), (1, 2, 3, 4))
gammar = TestFunction(T)
trace_form = dot(sigma, n)*gammar('+')*dS
K = Tensor(trace_form)
A = Tensor(a)
F = Tensor(L)
S = assemble(K * A.inv * K.T, bcs=bcs)
E = assemble(K * A.inv * F)
lambda_sol = Function(T)
solve(S, lambda_sol, E, solver_parameters={'pc_type': 'lu',
'ksp_type': 'preonly'})
|
Add a hybridization example using Slate manuallyfrom __future__ import absolute_import, print_function, division
from firedrake import *
qflag = False
degree = 1
mesh = UnitSquareMesh(8, 8, quadrilateral=qflag)
n = FacetNormal(mesh)
if qflag:
RT = FiniteElement("RTCF", quadrilateral, degree)
DG = FiniteElement("DQ", quadrilateral, degree - 1)
Te = FiniteElement("HDiv Trace", quadrilateral, degree - 1)
else:
RT = FiniteElement("RT", triangle, degree)
DG = FiniteElement("DG", triangle, degree - 1)
Te = FiniteElement("HDiv Trace", triangle, degree - 1)
Vd = FunctionSpace(mesh, BrokenElement(RT))
U = FunctionSpace(mesh, DG)
T = FunctionSpace(mesh, Te)
W = Vd * U
sigma, u = TrialFunctions(W)
tau, v = TestFunctions(W)
f = Function(U)
x, y = SpatialCoordinate(mesh)
f.interpolate((1 + 8*pi*pi)*sin(2*pi*x)*sin(2*pi*y))
a = (dot(sigma, tau) - div(tau)*u + v*div(sigma) + u*v)*dx
L = f*v*dx
bcs = DirichletBC(T, Constant(0.0), (1, 2, 3, 4))
gammar = TestFunction(T)
trace_form = dot(sigma, n)*gammar('+')*dS
K = Tensor(trace_form)
A = Tensor(a)
F = Tensor(L)
S = assemble(K * A.inv * K.T, bcs=bcs)
E = assemble(K * A.inv * F)
lambda_sol = Function(T)
solve(S, lambda_sol, E, solver_parameters={'pc_type': 'lu',
'ksp_type': 'preonly'})
|
<commit_before><commit_msg>Add a hybridization example using Slate manually<commit_after>from __future__ import absolute_import, print_function, division
from firedrake import *
qflag = False
degree = 1
mesh = UnitSquareMesh(8, 8, quadrilateral=qflag)
n = FacetNormal(mesh)
if qflag:
RT = FiniteElement("RTCF", quadrilateral, degree)
DG = FiniteElement("DQ", quadrilateral, degree - 1)
Te = FiniteElement("HDiv Trace", quadrilateral, degree - 1)
else:
RT = FiniteElement("RT", triangle, degree)
DG = FiniteElement("DG", triangle, degree - 1)
Te = FiniteElement("HDiv Trace", triangle, degree - 1)
Vd = FunctionSpace(mesh, BrokenElement(RT))
U = FunctionSpace(mesh, DG)
T = FunctionSpace(mesh, Te)
W = Vd * U
sigma, u = TrialFunctions(W)
tau, v = TestFunctions(W)
f = Function(U)
x, y = SpatialCoordinate(mesh)
f.interpolate((1 + 8*pi*pi)*sin(2*pi*x)*sin(2*pi*y))
a = (dot(sigma, tau) - div(tau)*u + v*div(sigma) + u*v)*dx
L = f*v*dx
bcs = DirichletBC(T, Constant(0.0), (1, 2, 3, 4))
gammar = TestFunction(T)
trace_form = dot(sigma, n)*gammar('+')*dS
K = Tensor(trace_form)
A = Tensor(a)
F = Tensor(L)
S = assemble(K * A.inv * K.T, bcs=bcs)
E = assemble(K * A.inv * F)
lambda_sol = Function(T)
solve(S, lambda_sol, E, solver_parameters={'pc_type': 'lu',
'ksp_type': 'preonly'})
|
|
8a844b78c5ded93ce7a75585a6ad2b86d8b4cb13
|
pida_type_decoder.py
|
pida_type_decoder.py
|
from pida_types import IDA_TYPES
from pida_tlocal_type import IdaTLocalType
def decode_step(ida_type):
# TODO :
pass
def decode_hybrid_type(ida_type):
value = {'idt': None, 'value': None}
rbyte = ord(ida_type[0])
if not (ida_type[1] == '#' and rbyte in [4, 5]):
value = {'idt': IDA_TYPES['str'], 'value': value[1:rbyte]}
return rbyte, value
offset = 2
ext = False
if rbyte == 5:
offset += 1
ext = True
local_type = IdaTLocalType(ida_type=IDA_TYPES['local_type'])
local_type.decode(ida_type[offset:], ext=ext)
value = local_type.get_type()
return rbyte, value
|
Add recognize decoding typedef or local type
|
Add recognize decoding typedef or local type
|
Python
|
mit
|
goodwinxp/ATFGenerator,goodwinxp/ATFGenerator,goodwinxp/ATFGenerator
|
Add recognize decoding typedef or local type
|
from pida_types import IDA_TYPES
from pida_tlocal_type import IdaTLocalType
def decode_step(ida_type):
# TODO :
pass
def decode_hybrid_type(ida_type):
value = {'idt': None, 'value': None}
rbyte = ord(ida_type[0])
if not (ida_type[1] == '#' and rbyte in [4, 5]):
value = {'idt': IDA_TYPES['str'], 'value': value[1:rbyte]}
return rbyte, value
offset = 2
ext = False
if rbyte == 5:
offset += 1
ext = True
local_type = IdaTLocalType(ida_type=IDA_TYPES['local_type'])
local_type.decode(ida_type[offset:], ext=ext)
value = local_type.get_type()
return rbyte, value
|
<commit_before><commit_msg>Add recognize decoding typedef or local type<commit_after>
|
from pida_types import IDA_TYPES
from pida_tlocal_type import IdaTLocalType
def decode_step(ida_type):
# TODO :
pass
def decode_hybrid_type(ida_type):
value = {'idt': None, 'value': None}
rbyte = ord(ida_type[0])
if not (ida_type[1] == '#' and rbyte in [4, 5]):
value = {'idt': IDA_TYPES['str'], 'value': value[1:rbyte]}
return rbyte, value
offset = 2
ext = False
if rbyte == 5:
offset += 1
ext = True
local_type = IdaTLocalType(ida_type=IDA_TYPES['local_type'])
local_type.decode(ida_type[offset:], ext=ext)
value = local_type.get_type()
return rbyte, value
|
Add recognize decoding typedef or local typefrom pida_types import IDA_TYPES
from pida_tlocal_type import IdaTLocalType
def decode_step(ida_type):
# TODO :
pass
def decode_hybrid_type(ida_type):
value = {'idt': None, 'value': None}
rbyte = ord(ida_type[0])
if not (ida_type[1] == '#' and rbyte in [4, 5]):
value = {'idt': IDA_TYPES['str'], 'value': value[1:rbyte]}
return rbyte, value
offset = 2
ext = False
if rbyte == 5:
offset += 1
ext = True
local_type = IdaTLocalType(ida_type=IDA_TYPES['local_type'])
local_type.decode(ida_type[offset:], ext=ext)
value = local_type.get_type()
return rbyte, value
|
<commit_before><commit_msg>Add recognize decoding typedef or local type<commit_after>from pida_types import IDA_TYPES
from pida_tlocal_type import IdaTLocalType
def decode_step(ida_type):
# TODO :
pass
def decode_hybrid_type(ida_type):
value = {'idt': None, 'value': None}
rbyte = ord(ida_type[0])
if not (ida_type[1] == '#' and rbyte in [4, 5]):
value = {'idt': IDA_TYPES['str'], 'value': value[1:rbyte]}
return rbyte, value
offset = 2
ext = False
if rbyte == 5:
offset += 1
ext = True
local_type = IdaTLocalType(ida_type=IDA_TYPES['local_type'])
local_type.decode(ida_type[offset:], ext=ext)
value = local_type.get_type()
return rbyte, value
|
|
0ad8d8665f064542346c3788cecaffdcb68f168a
|
plasmapy/utils/tests/test_exceptions.py
|
plasmapy/utils/tests/test_exceptions.py
|
import pytest
import warnings
from .. import (PlasmaPyError,
PhysicsError,
RelativityError,
AtomicError)
from .. import (PlasmaPyWarning,
PhysicsWarning,
RelativityWarning,
AtomicWarning)
plasmapy_exceptions = [
PlasmaPyError,
PhysicsError,
RelativityError,
AtomicError,
]
plasmapy_warnings = [
PlasmaPyWarning,
PhysicsWarning,
RelativityWarning,
AtomicWarning,
]
@pytest.mark.parametrize("exception", plasmapy_exceptions)
def test_exceptions(exception):
r"""Test that custom PlasmaPy exceptions can be raised with an
error message."""
with pytest.raises(exception):
raise exception("What an exceptionally exceptional exception!")
@pytest.mark.parametrize("warning", plasmapy_warnings)
def test_warnings(warning):
r"""Test that custom PlasmaPy warnings can be issued with a
warning message."""
with pytest.warns(warning):
warnings.warn("Coverage decreased (-0.00002%)", warning)
@pytest.mark.parametrize("exception", plasmapy_exceptions)
def test_PlasmaPyError_subclassing(exception):
r"""Test that each custom PlasmaPy exception can be caught
as a PlasmaPyError."""
with pytest.raises(PlasmaPyError):
raise exception("I'm sorry, Dave. I'm afraid I can't do that.")
@pytest.mark.parametrize("warning", plasmapy_warnings)
def test_PlasmaPyWarning_subclassing(warning):
r"""Test that custom PlasmaPy warnings can all be caught
as a PlasmaPyWarning."""
with pytest.warns(PlasmaPyWarning):
warnings.warn("Electrons are WEIRD.", warning)
|
Create tests for custom exceptions and warnings
|
Create tests for custom exceptions and warnings
|
Python
|
bsd-3-clause
|
StanczakDominik/PlasmaPy
|
Create tests for custom exceptions and warnings
|
import pytest
import warnings
from .. import (PlasmaPyError,
PhysicsError,
RelativityError,
AtomicError)
from .. import (PlasmaPyWarning,
PhysicsWarning,
RelativityWarning,
AtomicWarning)
plasmapy_exceptions = [
PlasmaPyError,
PhysicsError,
RelativityError,
AtomicError,
]
plasmapy_warnings = [
PlasmaPyWarning,
PhysicsWarning,
RelativityWarning,
AtomicWarning,
]
@pytest.mark.parametrize("exception", plasmapy_exceptions)
def test_exceptions(exception):
r"""Test that custom PlasmaPy exceptions can be raised with an
error message."""
with pytest.raises(exception):
raise exception("What an exceptionally exceptional exception!")
@pytest.mark.parametrize("warning", plasmapy_warnings)
def test_warnings(warning):
r"""Test that custom PlasmaPy warnings can be issued with a
warning message."""
with pytest.warns(warning):
warnings.warn("Coverage decreased (-0.00002%)", warning)
@pytest.mark.parametrize("exception", plasmapy_exceptions)
def test_PlasmaPyError_subclassing(exception):
r"""Test that each custom PlasmaPy exception can be caught
as a PlasmaPyError."""
with pytest.raises(PlasmaPyError):
raise exception("I'm sorry, Dave. I'm afraid I can't do that.")
@pytest.mark.parametrize("warning", plasmapy_warnings)
def test_PlasmaPyWarning_subclassing(warning):
r"""Test that custom PlasmaPy warnings can all be caught
as a PlasmaPyWarning."""
with pytest.warns(PlasmaPyWarning):
warnings.warn("Electrons are WEIRD.", warning)
|
<commit_before><commit_msg>Create tests for custom exceptions and warnings<commit_after>
|
import pytest
import warnings
from .. import (PlasmaPyError,
PhysicsError,
RelativityError,
AtomicError)
from .. import (PlasmaPyWarning,
PhysicsWarning,
RelativityWarning,
AtomicWarning)
plasmapy_exceptions = [
PlasmaPyError,
PhysicsError,
RelativityError,
AtomicError,
]
plasmapy_warnings = [
PlasmaPyWarning,
PhysicsWarning,
RelativityWarning,
AtomicWarning,
]
@pytest.mark.parametrize("exception", plasmapy_exceptions)
def test_exceptions(exception):
r"""Test that custom PlasmaPy exceptions can be raised with an
error message."""
with pytest.raises(exception):
raise exception("What an exceptionally exceptional exception!")
@pytest.mark.parametrize("warning", plasmapy_warnings)
def test_warnings(warning):
r"""Test that custom PlasmaPy warnings can be issued with a
warning message."""
with pytest.warns(warning):
warnings.warn("Coverage decreased (-0.00002%)", warning)
@pytest.mark.parametrize("exception", plasmapy_exceptions)
def test_PlasmaPyError_subclassing(exception):
r"""Test that each custom PlasmaPy exception can be caught
as a PlasmaPyError."""
with pytest.raises(PlasmaPyError):
raise exception("I'm sorry, Dave. I'm afraid I can't do that.")
@pytest.mark.parametrize("warning", plasmapy_warnings)
def test_PlasmaPyWarning_subclassing(warning):
r"""Test that custom PlasmaPy warnings can all be caught
as a PlasmaPyWarning."""
with pytest.warns(PlasmaPyWarning):
warnings.warn("Electrons are WEIRD.", warning)
|
Create tests for custom exceptions and warningsimport pytest
import warnings
from .. import (PlasmaPyError,
PhysicsError,
RelativityError,
AtomicError)
from .. import (PlasmaPyWarning,
PhysicsWarning,
RelativityWarning,
AtomicWarning)
plasmapy_exceptions = [
PlasmaPyError,
PhysicsError,
RelativityError,
AtomicError,
]
plasmapy_warnings = [
PlasmaPyWarning,
PhysicsWarning,
RelativityWarning,
AtomicWarning,
]
@pytest.mark.parametrize("exception", plasmapy_exceptions)
def test_exceptions(exception):
r"""Test that custom PlasmaPy exceptions can be raised with an
error message."""
with pytest.raises(exception):
raise exception("What an exceptionally exceptional exception!")
@pytest.mark.parametrize("warning", plasmapy_warnings)
def test_warnings(warning):
r"""Test that custom PlasmaPy warnings can be issued with a
warning message."""
with pytest.warns(warning):
warnings.warn("Coverage decreased (-0.00002%)", warning)
@pytest.mark.parametrize("exception", plasmapy_exceptions)
def test_PlasmaPyError_subclassing(exception):
r"""Test that each custom PlasmaPy exception can be caught
as a PlasmaPyError."""
with pytest.raises(PlasmaPyError):
raise exception("I'm sorry, Dave. I'm afraid I can't do that.")
@pytest.mark.parametrize("warning", plasmapy_warnings)
def test_PlasmaPyWarning_subclassing(warning):
r"""Test that custom PlasmaPy warnings can all be caught
as a PlasmaPyWarning."""
with pytest.warns(PlasmaPyWarning):
warnings.warn("Electrons are WEIRD.", warning)
|
<commit_before><commit_msg>Create tests for custom exceptions and warnings<commit_after>import pytest
import warnings
from .. import (PlasmaPyError,
PhysicsError,
RelativityError,
AtomicError)
from .. import (PlasmaPyWarning,
PhysicsWarning,
RelativityWarning,
AtomicWarning)
plasmapy_exceptions = [
PlasmaPyError,
PhysicsError,
RelativityError,
AtomicError,
]
plasmapy_warnings = [
PlasmaPyWarning,
PhysicsWarning,
RelativityWarning,
AtomicWarning,
]
@pytest.mark.parametrize("exception", plasmapy_exceptions)
def test_exceptions(exception):
r"""Test that custom PlasmaPy exceptions can be raised with an
error message."""
with pytest.raises(exception):
raise exception("What an exceptionally exceptional exception!")
@pytest.mark.parametrize("warning", plasmapy_warnings)
def test_warnings(warning):
r"""Test that custom PlasmaPy warnings can be issued with a
warning message."""
with pytest.warns(warning):
warnings.warn("Coverage decreased (-0.00002%)", warning)
@pytest.mark.parametrize("exception", plasmapy_exceptions)
def test_PlasmaPyError_subclassing(exception):
r"""Test that each custom PlasmaPy exception can be caught
as a PlasmaPyError."""
with pytest.raises(PlasmaPyError):
raise exception("I'm sorry, Dave. I'm afraid I can't do that.")
@pytest.mark.parametrize("warning", plasmapy_warnings)
def test_PlasmaPyWarning_subclassing(warning):
r"""Test that custom PlasmaPy warnings can all be caught
as a PlasmaPyWarning."""
with pytest.warns(PlasmaPyWarning):
warnings.warn("Electrons are WEIRD.", warning)
|
|
6cd12f2aaa6170daef88a913ee78b725b6450d61
|
proselint/checks/garner/not_guilty.py
|
proselint/checks/garner/not_guilty.py
|
# -*- coding: utf-8 -*-
"""Not guilty beyond a reasonable doubt.
---
layout: post
source: Garner's Modern American Usage
source_url: http://bit.ly/1T4alrY
title: Not guilty beyond a reasonable doubt.
date: 2016-03-09 15:50:31
categories: writing
---
This phrasing is ambiguous. The standard by which a jury decides criminal
charges is this: a defendant is guilty only if the evidence shows, beyond a
reasonable doubt, that he or she committed the crime. Otherwise, the defendant
is not guilty. Thus, we say that a defendant was not found "guilty beyond a
reasonable doubt."
If somebody is found not guilty, say "not guilty." Omit the standard
("beyond a reasonable doubt") to prevent a miscue.
Not guilty beyond a reasonable doubt
"""
from proselint.tools import existence_check, memoize
@memoize
def check(text):
"""Check the text."""
err = "misc.not_guilty"
msg = u"'not guilty beyond a reasonable doubt' is an ambiguous phrasing."
regex = r"not guilty beyond (a |any )?reasonable doubt"
return existence_check(text, [regex], err, msg)
|
Add check for 'not guilty beyond a reasonable doubt'
|
Add check for 'not guilty beyond a reasonable doubt'
Closes issue #242
|
Python
|
bsd-3-clause
|
amperser/proselint,amperser/proselint,amperser/proselint,amperser/proselint,amperser/proselint
|
Add check for 'not guilty beyond a reasonable doubt'
Closes issue #242
|
# -*- coding: utf-8 -*-
"""Not guilty beyond a reasonable doubt.
---
layout: post
source: Garner's Modern American Usage
source_url: http://bit.ly/1T4alrY
title: Not guilty beyond a reasonable doubt.
date: 2016-03-09 15:50:31
categories: writing
---
This phrasing is ambiguous. The standard by which a jury decides criminal
charges is this: a defendant is guilty only if the evidence shows, beyond a
reasonable doubt, that he or she committed the crime. Otherwise, the defendant
is not guilty. Thus, we say that a defendant was not found "guilty beyond a
reasonable doubt."
If somebody is found not guilty, say "not guilty." Omit the standard
("beyond a reasonable doubt") to prevent a miscue.
Not guilty beyond a reasonable doubt
"""
from proselint.tools import existence_check, memoize
@memoize
def check(text):
"""Check the text."""
err = "misc.not_guilty"
msg = u"'not guilty beyond a reasonable doubt' is an ambiguous phrasing."
regex = r"not guilty beyond (a |any )?reasonable doubt"
return existence_check(text, [regex], err, msg)
|
<commit_before><commit_msg>Add check for 'not guilty beyond a reasonable doubt'
Closes issue #242<commit_after>
|
# -*- coding: utf-8 -*-
"""Not guilty beyond a reasonable doubt.
---
layout: post
source: Garner's Modern American Usage
source_url: http://bit.ly/1T4alrY
title: Not guilty beyond a reasonable doubt.
date: 2016-03-09 15:50:31
categories: writing
---
This phrasing is ambiguous. The standard by which a jury decides criminal
charges is this: a defendant is guilty only if the evidence shows, beyond a
reasonable doubt, that he or she committed the crime. Otherwise, the defendant
is not guilty. Thus, we say that a defendant was not found "guilty beyond a
reasonable doubt."
If somebody is found not guilty, say "not guilty." Omit the standard
("beyond a reasonable doubt") to prevent a miscue.
Not guilty beyond a reasonable doubt
"""
from proselint.tools import existence_check, memoize
@memoize
def check(text):
"""Check the text."""
err = "misc.not_guilty"
msg = u"'not guilty beyond a reasonable doubt' is an ambiguous phrasing."
regex = r"not guilty beyond (a |any )?reasonable doubt"
return existence_check(text, [regex], err, msg)
|
Add check for 'not guilty beyond a reasonable doubt'
Closes issue #242# -*- coding: utf-8 -*-
"""Not guilty beyond a reasonable doubt.
---
layout: post
source: Garner's Modern American Usage
source_url: http://bit.ly/1T4alrY
title: Not guilty beyond a reasonable doubt.
date: 2016-03-09 15:50:31
categories: writing
---
This phrasing is ambiguous. The standard by which a jury decides criminal
charges is this: a defendant is guilty only if the evidence shows, beyond a
reasonable doubt, that he or she committed the crime. Otherwise, the defendant
is not guilty. Thus, we say that a defendant was not found "guilty beyond a
reasonable doubt."
If somebody is found not guilty, say "not guilty." Omit the standard
("beyond a reasonable doubt") to prevent a miscue.
Not guilty beyond a reasonable doubt
"""
from proselint.tools import existence_check, memoize
@memoize
def check(text):
"""Check the text."""
err = "misc.not_guilty"
msg = u"'not guilty beyond a reasonable doubt' is an ambiguous phrasing."
regex = r"not guilty beyond (a |any )?reasonable doubt"
return existence_check(text, [regex], err, msg)
|
<commit_before><commit_msg>Add check for 'not guilty beyond a reasonable doubt'
Closes issue #242<commit_after># -*- coding: utf-8 -*-
"""Not guilty beyond a reasonable doubt.
---
layout: post
source: Garner's Modern American Usage
source_url: http://bit.ly/1T4alrY
title: Not guilty beyond a reasonable doubt.
date: 2016-03-09 15:50:31
categories: writing
---
This phrasing is ambiguous. The standard by which a jury decides criminal
charges is this: a defendant is guilty only if the evidence shows, beyond a
reasonable doubt, that he or she committed the crime. Otherwise, the defendant
is not guilty. Thus, we say that a defendant was not found "guilty beyond a
reasonable doubt."
If somebody is found not guilty, say "not guilty." Omit the standard
("beyond a reasonable doubt") to prevent a miscue.
Not guilty beyond a reasonable doubt
"""
from proselint.tools import existence_check, memoize
@memoize
def check(text):
"""Check the text."""
err = "misc.not_guilty"
msg = u"'not guilty beyond a reasonable doubt' is an ambiguous phrasing."
regex = r"not guilty beyond (a |any )?reasonable doubt"
return existence_check(text, [regex], err, msg)
|
|
2c9760da48caaf9656c8b1e3f81e70671b7e7c5e
|
postgres/audit/migrations/0003_auditlog_app_session.py
|
postgres/audit/migrations/0003_auditlog_app_session.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('audit', '0002_auditlog'),
]
operations = [
migrations.AddField(
model_name='auditlog',
name='app_session',
field=models.TextField(null=True),
preserve_default=True,
),
]
|
Add missing migration for audit app.
|
Add missing migration for audit app.
|
Python
|
bsd-3-clause
|
wlanslovenija/django-postgres
|
Add missing migration for audit app.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('audit', '0002_auditlog'),
]
operations = [
migrations.AddField(
model_name='auditlog',
name='app_session',
field=models.TextField(null=True),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add missing migration for audit app.<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('audit', '0002_auditlog'),
]
operations = [
migrations.AddField(
model_name='auditlog',
name='app_session',
field=models.TextField(null=True),
preserve_default=True,
),
]
|
Add missing migration for audit app.# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('audit', '0002_auditlog'),
]
operations = [
migrations.AddField(
model_name='auditlog',
name='app_session',
field=models.TextField(null=True),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add missing migration for audit app.<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('audit', '0002_auditlog'),
]
operations = [
migrations.AddField(
model_name='auditlog',
name='app_session',
field=models.TextField(null=True),
preserve_default=True,
),
]
|
|
a466a89cd18252c6d90fd3b590148ca3268ff637
|
karabo_data/tests/test_lpd_geometry.py
|
karabo_data/tests/test_lpd_geometry.py
|
from matplotlib.figure import Figure
import numpy as np
from karabo_data.geometry2 import LPD_1MGeometry
def test_inspect():
geom = LPD_1MGeometry.from_quad_positions([
(11.4, 299),
(-11.5, 8),
(254.5, -16),
(278.5, 275)
])
# Smoketest
fig = geom.inspect()
assert isinstance(fig, Figure)
def test_snap_assemble_data():
geom = LPD_1MGeometry.from_quad_positions([
(11.4, 299),
(-11.5, 8),
(254.5, -16),
(278.5, 275)
])
stacked_data = np.zeros((16, 256, 256))
img, centre = geom.position_modules_fast(stacked_data)
assert img.shape == (1202, 1104)
assert tuple(centre) == (604, 547)
assert np.isnan(img[0, 0])
assert img[50, 50] == 0
|
Add a couple of simple tests for LPD geometry
|
Add a couple of simple tests for LPD geometry
|
Python
|
bsd-3-clause
|
European-XFEL/h5tools-py
|
Add a couple of simple tests for LPD geometry
|
from matplotlib.figure import Figure
import numpy as np
from karabo_data.geometry2 import LPD_1MGeometry
def test_inspect():
geom = LPD_1MGeometry.from_quad_positions([
(11.4, 299),
(-11.5, 8),
(254.5, -16),
(278.5, 275)
])
# Smoketest
fig = geom.inspect()
assert isinstance(fig, Figure)
def test_snap_assemble_data():
geom = LPD_1MGeometry.from_quad_positions([
(11.4, 299),
(-11.5, 8),
(254.5, -16),
(278.5, 275)
])
stacked_data = np.zeros((16, 256, 256))
img, centre = geom.position_modules_fast(stacked_data)
assert img.shape == (1202, 1104)
assert tuple(centre) == (604, 547)
assert np.isnan(img[0, 0])
assert img[50, 50] == 0
|
<commit_before><commit_msg>Add a couple of simple tests for LPD geometry<commit_after>
|
from matplotlib.figure import Figure
import numpy as np
from karabo_data.geometry2 import LPD_1MGeometry
def test_inspect():
geom = LPD_1MGeometry.from_quad_positions([
(11.4, 299),
(-11.5, 8),
(254.5, -16),
(278.5, 275)
])
# Smoketest
fig = geom.inspect()
assert isinstance(fig, Figure)
def test_snap_assemble_data():
geom = LPD_1MGeometry.from_quad_positions([
(11.4, 299),
(-11.5, 8),
(254.5, -16),
(278.5, 275)
])
stacked_data = np.zeros((16, 256, 256))
img, centre = geom.position_modules_fast(stacked_data)
assert img.shape == (1202, 1104)
assert tuple(centre) == (604, 547)
assert np.isnan(img[0, 0])
assert img[50, 50] == 0
|
Add a couple of simple tests for LPD geometryfrom matplotlib.figure import Figure
import numpy as np
from karabo_data.geometry2 import LPD_1MGeometry
def test_inspect():
geom = LPD_1MGeometry.from_quad_positions([
(11.4, 299),
(-11.5, 8),
(254.5, -16),
(278.5, 275)
])
# Smoketest
fig = geom.inspect()
assert isinstance(fig, Figure)
def test_snap_assemble_data():
geom = LPD_1MGeometry.from_quad_positions([
(11.4, 299),
(-11.5, 8),
(254.5, -16),
(278.5, 275)
])
stacked_data = np.zeros((16, 256, 256))
img, centre = geom.position_modules_fast(stacked_data)
assert img.shape == (1202, 1104)
assert tuple(centre) == (604, 547)
assert np.isnan(img[0, 0])
assert img[50, 50] == 0
|
<commit_before><commit_msg>Add a couple of simple tests for LPD geometry<commit_after>from matplotlib.figure import Figure
import numpy as np
from karabo_data.geometry2 import LPD_1MGeometry
def test_inspect():
geom = LPD_1MGeometry.from_quad_positions([
(11.4, 299),
(-11.5, 8),
(254.5, -16),
(278.5, 275)
])
# Smoketest
fig = geom.inspect()
assert isinstance(fig, Figure)
def test_snap_assemble_data():
geom = LPD_1MGeometry.from_quad_positions([
(11.4, 299),
(-11.5, 8),
(254.5, -16),
(278.5, 275)
])
stacked_data = np.zeros((16, 256, 256))
img, centre = geom.position_modules_fast(stacked_data)
assert img.shape == (1202, 1104)
assert tuple(centre) == (604, 547)
assert np.isnan(img[0, 0])
assert img[50, 50] == 0
|
|
80f35ad0d3a6a1f04eb0339bb1088ebe6eb27af5
|
mongomock/results.py
|
mongomock/results.py
|
try:
from pymongo.results import InsertOneResult
from pymongo.results import InsertManyResult
from pymongo.results import UpdateResult
from pymongo.results import DeleteResult
except ImportError:
class _WriteResult(object):
def __init__(self, acknowledged=True):
self.__acknowledged = acknowledged
@property
def acknowledged(self):
return self.__acknowledged
class InsertOneResult(_WriteResult):
__slots__ = ('__inserted_id', '__acknowledged')
def __init__(self, inserted_id, acknowledged=True):
self.__inserted_id = inserted_id
super(InsertOneResult, self).__init__(acknowledged)
@property
def inserted_id(self):
return self.__inserted_id
class InsertManyResult(_WriteResult):
__slots__ = ('__inserted_ids', '__acknowledged')
def __init__(self, inserted_ids, acknowledged=True):
self.__inserted_ids = inserted_ids
super(InsertManyResult, self).__init__(acknowledged)
@property
def inserted_ids(self):
return self.__inserted_ids
class UpdateResult(_WriteResult):
__slots__ = ('__raw_result', '__acknowledged')
def __init__(self, raw_result, acknowledged=True):
self.__raw_result = raw_result
super(UpdateResult, self).__init__(acknowledged)
@property
def raw_result(self):
return self.__raw_result
@property
def matched_count(self):
if self.upserted_id is not None:
return 0
self.__raw_result.get('n', 0)
@property
def modified_count(self):
return self.__raw_result.get('nModified')
@property
def upserted_id(self):
return self.__raw_result.get('upserted')
class DeleteResult(_WriteResult):
__slots__ = ('__raw_result', '__acknowledged')
def __init__(self, raw_result, acknowledged=True):
self.__raw_result = raw_result
super(DeleteResult, self).__init__(acknowledged)
@property
def raw_result(self):
return self.__raw_result
@property
def deleted_count(self):
return self.__raw_result.get('n', 0)
|
Add result classes for update/insert/delete ops
|
Add result classes for update/insert/delete ops
|
Python
|
bsd-3-clause
|
vmalloc/mongomock,marcinbarczynski/mongomock,mdomke/mongomock,drorasaf/mongomock,magaman384/mongomock,StarfishStorage/mongomock,julianhille/mongomock
|
Add result classes for update/insert/delete ops
|
try:
from pymongo.results import InsertOneResult
from pymongo.results import InsertManyResult
from pymongo.results import UpdateResult
from pymongo.results import DeleteResult
except ImportError:
class _WriteResult(object):
def __init__(self, acknowledged=True):
self.__acknowledged = acknowledged
@property
def acknowledged(self):
return self.__acknowledged
class InsertOneResult(_WriteResult):
__slots__ = ('__inserted_id', '__acknowledged')
def __init__(self, inserted_id, acknowledged=True):
self.__inserted_id = inserted_id
super(InsertOneResult, self).__init__(acknowledged)
@property
def inserted_id(self):
return self.__inserted_id
class InsertManyResult(_WriteResult):
__slots__ = ('__inserted_ids', '__acknowledged')
def __init__(self, inserted_ids, acknowledged=True):
self.__inserted_ids = inserted_ids
super(InsertManyResult, self).__init__(acknowledged)
@property
def inserted_ids(self):
return self.__inserted_ids
class UpdateResult(_WriteResult):
__slots__ = ('__raw_result', '__acknowledged')
def __init__(self, raw_result, acknowledged=True):
self.__raw_result = raw_result
super(UpdateResult, self).__init__(acknowledged)
@property
def raw_result(self):
return self.__raw_result
@property
def matched_count(self):
if self.upserted_id is not None:
return 0
self.__raw_result.get('n', 0)
@property
def modified_count(self):
return self.__raw_result.get('nModified')
@property
def upserted_id(self):
return self.__raw_result.get('upserted')
class DeleteResult(_WriteResult):
__slots__ = ('__raw_result', '__acknowledged')
def __init__(self, raw_result, acknowledged=True):
self.__raw_result = raw_result
super(DeleteResult, self).__init__(acknowledged)
@property
def raw_result(self):
return self.__raw_result
@property
def deleted_count(self):
return self.__raw_result.get('n', 0)
|
<commit_before><commit_msg>Add result classes for update/insert/delete ops<commit_after>
|
try:
from pymongo.results import InsertOneResult
from pymongo.results import InsertManyResult
from pymongo.results import UpdateResult
from pymongo.results import DeleteResult
except ImportError:
class _WriteResult(object):
def __init__(self, acknowledged=True):
self.__acknowledged = acknowledged
@property
def acknowledged(self):
return self.__acknowledged
class InsertOneResult(_WriteResult):
__slots__ = ('__inserted_id', '__acknowledged')
def __init__(self, inserted_id, acknowledged=True):
self.__inserted_id = inserted_id
super(InsertOneResult, self).__init__(acknowledged)
@property
def inserted_id(self):
return self.__inserted_id
class InsertManyResult(_WriteResult):
__slots__ = ('__inserted_ids', '__acknowledged')
def __init__(self, inserted_ids, acknowledged=True):
self.__inserted_ids = inserted_ids
super(InsertManyResult, self).__init__(acknowledged)
@property
def inserted_ids(self):
return self.__inserted_ids
class UpdateResult(_WriteResult):
__slots__ = ('__raw_result', '__acknowledged')
def __init__(self, raw_result, acknowledged=True):
self.__raw_result = raw_result
super(UpdateResult, self).__init__(acknowledged)
@property
def raw_result(self):
return self.__raw_result
@property
def matched_count(self):
if self.upserted_id is not None:
return 0
self.__raw_result.get('n', 0)
@property
def modified_count(self):
return self.__raw_result.get('nModified')
@property
def upserted_id(self):
return self.__raw_result.get('upserted')
class DeleteResult(_WriteResult):
__slots__ = ('__raw_result', '__acknowledged')
def __init__(self, raw_result, acknowledged=True):
self.__raw_result = raw_result
super(DeleteResult, self).__init__(acknowledged)
@property
def raw_result(self):
return self.__raw_result
@property
def deleted_count(self):
return self.__raw_result.get('n', 0)
|
Add result classes for update/insert/delete opstry:
from pymongo.results import InsertOneResult
from pymongo.results import InsertManyResult
from pymongo.results import UpdateResult
from pymongo.results import DeleteResult
except ImportError:
class _WriteResult(object):
def __init__(self, acknowledged=True):
self.__acknowledged = acknowledged
@property
def acknowledged(self):
return self.__acknowledged
class InsertOneResult(_WriteResult):
__slots__ = ('__inserted_id', '__acknowledged')
def __init__(self, inserted_id, acknowledged=True):
self.__inserted_id = inserted_id
super(InsertOneResult, self).__init__(acknowledged)
@property
def inserted_id(self):
return self.__inserted_id
class InsertManyResult(_WriteResult):
__slots__ = ('__inserted_ids', '__acknowledged')
def __init__(self, inserted_ids, acknowledged=True):
self.__inserted_ids = inserted_ids
super(InsertManyResult, self).__init__(acknowledged)
@property
def inserted_ids(self):
return self.__inserted_ids
class UpdateResult(_WriteResult):
__slots__ = ('__raw_result', '__acknowledged')
def __init__(self, raw_result, acknowledged=True):
self.__raw_result = raw_result
super(UpdateResult, self).__init__(acknowledged)
@property
def raw_result(self):
return self.__raw_result
@property
def matched_count(self):
if self.upserted_id is not None:
return 0
self.__raw_result.get('n', 0)
@property
def modified_count(self):
return self.__raw_result.get('nModified')
@property
def upserted_id(self):
return self.__raw_result.get('upserted')
class DeleteResult(_WriteResult):
__slots__ = ('__raw_result', '__acknowledged')
def __init__(self, raw_result, acknowledged=True):
self.__raw_result = raw_result
super(DeleteResult, self).__init__(acknowledged)
@property
def raw_result(self):
return self.__raw_result
@property
def deleted_count(self):
return self.__raw_result.get('n', 0)
|
<commit_before><commit_msg>Add result classes for update/insert/delete ops<commit_after>try:
from pymongo.results import InsertOneResult
from pymongo.results import InsertManyResult
from pymongo.results import UpdateResult
from pymongo.results import DeleteResult
except ImportError:
class _WriteResult(object):
def __init__(self, acknowledged=True):
self.__acknowledged = acknowledged
@property
def acknowledged(self):
return self.__acknowledged
class InsertOneResult(_WriteResult):
__slots__ = ('__inserted_id', '__acknowledged')
def __init__(self, inserted_id, acknowledged=True):
self.__inserted_id = inserted_id
super(InsertOneResult, self).__init__(acknowledged)
@property
def inserted_id(self):
return self.__inserted_id
class InsertManyResult(_WriteResult):
__slots__ = ('__inserted_ids', '__acknowledged')
def __init__(self, inserted_ids, acknowledged=True):
self.__inserted_ids = inserted_ids
super(InsertManyResult, self).__init__(acknowledged)
@property
def inserted_ids(self):
return self.__inserted_ids
class UpdateResult(_WriteResult):
__slots__ = ('__raw_result', '__acknowledged')
def __init__(self, raw_result, acknowledged=True):
self.__raw_result = raw_result
super(UpdateResult, self).__init__(acknowledged)
@property
def raw_result(self):
return self.__raw_result
@property
def matched_count(self):
if self.upserted_id is not None:
return 0
self.__raw_result.get('n', 0)
@property
def modified_count(self):
return self.__raw_result.get('nModified')
@property
def upserted_id(self):
return self.__raw_result.get('upserted')
class DeleteResult(_WriteResult):
__slots__ = ('__raw_result', '__acknowledged')
def __init__(self, raw_result, acknowledged=True):
self.__raw_result = raw_result
super(DeleteResult, self).__init__(acknowledged)
@property
def raw_result(self):
return self.__raw_result
@property
def deleted_count(self):
return self.__raw_result.get('n', 0)
|
|
9d44e4eb4c8d2c2f10152894f7c53d9feaae528c
|
api_bouncer/middlewares/ip_restriction.py
|
api_bouncer/middlewares/ip_restriction.py
|
import ipaddress
from django.http import JsonResponse
from ..models import Plugin
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
class IpRestrictionMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
host = request.META.get('HTTP_HOST')
consumer_id = request.META.get('HTTP_CONSUMER_ID')
plugin_conf = Plugin.objects.filter(
api__hosts__contains=[host],
name='ip-restriction'
).first()
if (
plugin_conf and (
not plugin_conf.config.get('consumer_id') or
plugin_conf.config.get('consumer_id') == consumer_id
)
):
config = plugin_conf.config
whitelist = config['whitelist']
blacklist = config['blacklist']
client_ip = get_client_ip(request)
if not self.check_ip_address(client_ip, blacklist, whitelist):
return JsonResponse({'errors': 'Forbidden'}, status=403)
response = self.get_response(request)
return response
def check_ip_address(self, client_ip, blacklist, whitelist):
client_ip = ipaddress.ip_address(client_ip)
for ip in blacklist:
if client_ip in ipaddress.ip_network(ip):
return False
if (
whitelist and
not any([
client_ip in
ipaddress.ip_network(ip) for ip in whitelist
])
):
return False
return True
|
Add ip-restriction plugin to declare ip whitelists/blacklists and restrict api access
|
Add ip-restriction plugin to declare ip whitelists/blacklists
and restrict api access
|
Python
|
apache-2.0
|
menecio/django-api-bouncer
|
Add ip-restriction plugin to declare ip whitelists/blacklists
and restrict api access
|
import ipaddress
from django.http import JsonResponse
from ..models import Plugin
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
class IpRestrictionMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
host = request.META.get('HTTP_HOST')
consumer_id = request.META.get('HTTP_CONSUMER_ID')
plugin_conf = Plugin.objects.filter(
api__hosts__contains=[host],
name='ip-restriction'
).first()
if (
plugin_conf and (
not plugin_conf.config.get('consumer_id') or
plugin_conf.config.get('consumer_id') == consumer_id
)
):
config = plugin_conf.config
whitelist = config['whitelist']
blacklist = config['blacklist']
client_ip = get_client_ip(request)
if not self.check_ip_address(client_ip, blacklist, whitelist):
return JsonResponse({'errors': 'Forbidden'}, status=403)
response = self.get_response(request)
return response
def check_ip_address(self, client_ip, blacklist, whitelist):
client_ip = ipaddress.ip_address(client_ip)
for ip in blacklist:
if client_ip in ipaddress.ip_network(ip):
return False
if (
whitelist and
not any([
client_ip in
ipaddress.ip_network(ip) for ip in whitelist
])
):
return False
return True
|
<commit_before><commit_msg>Add ip-restriction plugin to declare ip whitelists/blacklists
and restrict api access<commit_after>
|
import ipaddress
from django.http import JsonResponse
from ..models import Plugin
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
class IpRestrictionMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
host = request.META.get('HTTP_HOST')
consumer_id = request.META.get('HTTP_CONSUMER_ID')
plugin_conf = Plugin.objects.filter(
api__hosts__contains=[host],
name='ip-restriction'
).first()
if (
plugin_conf and (
not plugin_conf.config.get('consumer_id') or
plugin_conf.config.get('consumer_id') == consumer_id
)
):
config = plugin_conf.config
whitelist = config['whitelist']
blacklist = config['blacklist']
client_ip = get_client_ip(request)
if not self.check_ip_address(client_ip, blacklist, whitelist):
return JsonResponse({'errors': 'Forbidden'}, status=403)
response = self.get_response(request)
return response
def check_ip_address(self, client_ip, blacklist, whitelist):
client_ip = ipaddress.ip_address(client_ip)
for ip in blacklist:
if client_ip in ipaddress.ip_network(ip):
return False
if (
whitelist and
not any([
client_ip in
ipaddress.ip_network(ip) for ip in whitelist
])
):
return False
return True
|
Add ip-restriction plugin to declare ip whitelists/blacklists
and restrict api accessimport ipaddress
from django.http import JsonResponse
from ..models import Plugin
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
class IpRestrictionMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
host = request.META.get('HTTP_HOST')
consumer_id = request.META.get('HTTP_CONSUMER_ID')
plugin_conf = Plugin.objects.filter(
api__hosts__contains=[host],
name='ip-restriction'
).first()
if (
plugin_conf and (
not plugin_conf.config.get('consumer_id') or
plugin_conf.config.get('consumer_id') == consumer_id
)
):
config = plugin_conf.config
whitelist = config['whitelist']
blacklist = config['blacklist']
client_ip = get_client_ip(request)
if not self.check_ip_address(client_ip, blacklist, whitelist):
return JsonResponse({'errors': 'Forbidden'}, status=403)
response = self.get_response(request)
return response
def check_ip_address(self, client_ip, blacklist, whitelist):
client_ip = ipaddress.ip_address(client_ip)
for ip in blacklist:
if client_ip in ipaddress.ip_network(ip):
return False
if (
whitelist and
not any([
client_ip in
ipaddress.ip_network(ip) for ip in whitelist
])
):
return False
return True
|
<commit_before><commit_msg>Add ip-restriction plugin to declare ip whitelists/blacklists
and restrict api access<commit_after>import ipaddress
from django.http import JsonResponse
from ..models import Plugin
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
class IpRestrictionMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
host = request.META.get('HTTP_HOST')
consumer_id = request.META.get('HTTP_CONSUMER_ID')
plugin_conf = Plugin.objects.filter(
api__hosts__contains=[host],
name='ip-restriction'
).first()
if (
plugin_conf and (
not plugin_conf.config.get('consumer_id') or
plugin_conf.config.get('consumer_id') == consumer_id
)
):
config = plugin_conf.config
whitelist = config['whitelist']
blacklist = config['blacklist']
client_ip = get_client_ip(request)
if not self.check_ip_address(client_ip, blacklist, whitelist):
return JsonResponse({'errors': 'Forbidden'}, status=403)
response = self.get_response(request)
return response
def check_ip_address(self, client_ip, blacklist, whitelist):
client_ip = ipaddress.ip_address(client_ip)
for ip in blacklist:
if client_ip in ipaddress.ip_network(ip):
return False
if (
whitelist and
not any([
client_ip in
ipaddress.ip_network(ip) for ip in whitelist
])
):
return False
return True
|
|
94d8adf9d48c6118a3467947ad8b1ae0b6dd3d63
|
blog/migrations/0006_auto_20160513_1634.py
|
blog/migrations/0006_auto_20160513_1634.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-05-13 13:34
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0005_auto_20160422_1256'),
]
operations = [
migrations.AddField(
model_name='post',
name='content',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='post',
name='data_published',
field=models.DateField(auto_now_add=True, null=True),
),
migrations.AddField(
model_name='post',
name='description',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='post',
name='h1',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AddField(
model_name='post',
name='is_active',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='post',
name='keywords',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='post',
name='title',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='post',
name='type',
field=models.CharField(choices=[('article', 'Articles'), ('navigation', 'Navigation'), ('news', 'Daily news')], default='article', max_length=100),
),
]
|
Fix - add missed migrations
|
Fix - add missed migrations
|
Python
|
mit
|
fidals/refarm-site,fidals/refarm-site,fidals/refarm-site
|
Fix - add missed migrations
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-05-13 13:34
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0005_auto_20160422_1256'),
]
operations = [
migrations.AddField(
model_name='post',
name='content',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='post',
name='data_published',
field=models.DateField(auto_now_add=True, null=True),
),
migrations.AddField(
model_name='post',
name='description',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='post',
name='h1',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AddField(
model_name='post',
name='is_active',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='post',
name='keywords',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='post',
name='title',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='post',
name='type',
field=models.CharField(choices=[('article', 'Articles'), ('navigation', 'Navigation'), ('news', 'Daily news')], default='article', max_length=100),
),
]
|
<commit_before><commit_msg>Fix - add missed migrations<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-05-13 13:34
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0005_auto_20160422_1256'),
]
operations = [
migrations.AddField(
model_name='post',
name='content',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='post',
name='data_published',
field=models.DateField(auto_now_add=True, null=True),
),
migrations.AddField(
model_name='post',
name='description',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='post',
name='h1',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AddField(
model_name='post',
name='is_active',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='post',
name='keywords',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='post',
name='title',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='post',
name='type',
field=models.CharField(choices=[('article', 'Articles'), ('navigation', 'Navigation'), ('news', 'Daily news')], default='article', max_length=100),
),
]
|
Fix - add missed migrations# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-05-13 13:34
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0005_auto_20160422_1256'),
]
operations = [
migrations.AddField(
model_name='post',
name='content',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='post',
name='data_published',
field=models.DateField(auto_now_add=True, null=True),
),
migrations.AddField(
model_name='post',
name='description',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='post',
name='h1',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AddField(
model_name='post',
name='is_active',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='post',
name='keywords',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='post',
name='title',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='post',
name='type',
field=models.CharField(choices=[('article', 'Articles'), ('navigation', 'Navigation'), ('news', 'Daily news')], default='article', max_length=100),
),
]
|
<commit_before><commit_msg>Fix - add missed migrations<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-05-13 13:34
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0005_auto_20160422_1256'),
]
operations = [
migrations.AddField(
model_name='post',
name='content',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='post',
name='data_published',
field=models.DateField(auto_now_add=True, null=True),
),
migrations.AddField(
model_name='post',
name='description',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='post',
name='h1',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AddField(
model_name='post',
name='is_active',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='post',
name='keywords',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='post',
name='title',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='post',
name='type',
field=models.CharField(choices=[('article', 'Articles'), ('navigation', 'Navigation'), ('news', 'Daily news')], default='article', max_length=100),
),
]
|
|
4681ee081f5600cebf7540862efc60dbf1d190d7
|
test_app.py
|
test_app.py
|
import unittest
from unittest import TestCase
from user import User
from bucketlist import BucketList
from flask import url_for
from app import app
class BucketListTest(TestCase):
def setUp(self):
# creates a test client
self.client = app.test_client()
self.client.testing = True
def test_success(self):
# sends HTTP GET request to the application
# on the specified path
result = self.client.get('/login')
self.assertEqual(result.status_code, 200)
def test_failure(self):
# sends HTTP GET request to the application
# on the specified path
result = self.client.get('/nonexistant.html')
self.assertEqual(result.status_code, 404)
def test_login_page_loads(self):
# assert login page loads correctly
result = self.client.get('/login')
self.assertTrue(b'The best way to keep track of your dreams and goals' in result.data)
'''
def test_signup(self):
# register a new account
response = self.client.post(url_for('/signup'), data={
'username': 'hermano',
'email': 'herm@email.com',
'password': 'hard',
'confirm_password': 'hard'
})
self.assertTrue(response.status_code == 302)
'''
if __name__ == '__main__':
unittest.main()
|
Rename test module and add test for login page content
|
Rename test module and add test for login page content
|
Python
|
mit
|
mkiterian/bucket-list-app,mkiterian/bucket-list-app,mkiterian/bucket-list-app
|
Rename test module and add test for login page content
|
import unittest
from unittest import TestCase
from user import User
from bucketlist import BucketList
from flask import url_for
from app import app
class BucketListTest(TestCase):
def setUp(self):
# creates a test client
self.client = app.test_client()
self.client.testing = True
def test_success(self):
# sends HTTP GET request to the application
# on the specified path
result = self.client.get('/login')
self.assertEqual(result.status_code, 200)
def test_failure(self):
# sends HTTP GET request to the application
# on the specified path
result = self.client.get('/nonexistant.html')
self.assertEqual(result.status_code, 404)
def test_login_page_loads(self):
# assert login page loads correctly
result = self.client.get('/login')
self.assertTrue(b'The best way to keep track of your dreams and goals' in result.data)
'''
def test_signup(self):
# register a new account
response = self.client.post(url_for('/signup'), data={
'username': 'hermano',
'email': 'herm@email.com',
'password': 'hard',
'confirm_password': 'hard'
})
self.assertTrue(response.status_code == 302)
'''
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Rename test module and add test for login page content<commit_after>
|
import unittest
from unittest import TestCase
from user import User
from bucketlist import BucketList
from flask import url_for
from app import app
class BucketListTest(TestCase):
def setUp(self):
# creates a test client
self.client = app.test_client()
self.client.testing = True
def test_success(self):
# sends HTTP GET request to the application
# on the specified path
result = self.client.get('/login')
self.assertEqual(result.status_code, 200)
def test_failure(self):
# sends HTTP GET request to the application
# on the specified path
result = self.client.get('/nonexistant.html')
self.assertEqual(result.status_code, 404)
def test_login_page_loads(self):
# assert login page loads correctly
result = self.client.get('/login')
self.assertTrue(b'The best way to keep track of your dreams and goals' in result.data)
'''
def test_signup(self):
# register a new account
response = self.client.post(url_for('/signup'), data={
'username': 'hermano',
'email': 'herm@email.com',
'password': 'hard',
'confirm_password': 'hard'
})
self.assertTrue(response.status_code == 302)
'''
if __name__ == '__main__':
unittest.main()
|
Rename test module and add test for login page contentimport unittest
from unittest import TestCase
from user import User
from bucketlist import BucketList
from flask import url_for
from app import app
class BucketListTest(TestCase):
def setUp(self):
# creates a test client
self.client = app.test_client()
self.client.testing = True
def test_success(self):
# sends HTTP GET request to the application
# on the specified path
result = self.client.get('/login')
self.assertEqual(result.status_code, 200)
def test_failure(self):
# sends HTTP GET request to the application
# on the specified path
result = self.client.get('/nonexistant.html')
self.assertEqual(result.status_code, 404)
def test_login_page_loads(self):
# assert login page loads correctly
result = self.client.get('/login')
self.assertTrue(b'The best way to keep track of your dreams and goals' in result.data)
'''
def test_signup(self):
# register a new account
response = self.client.post(url_for('/signup'), data={
'username': 'hermano',
'email': 'herm@email.com',
'password': 'hard',
'confirm_password': 'hard'
})
self.assertTrue(response.status_code == 302)
'''
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Rename test module and add test for login page content<commit_after>import unittest
from unittest import TestCase
from user import User
from bucketlist import BucketList
from flask import url_for
from app import app
class BucketListTest(TestCase):
def setUp(self):
# creates a test client
self.client = app.test_client()
self.client.testing = True
def test_success(self):
# sends HTTP GET request to the application
# on the specified path
result = self.client.get('/login')
self.assertEqual(result.status_code, 200)
def test_failure(self):
# sends HTTP GET request to the application
# on the specified path
result = self.client.get('/nonexistant.html')
self.assertEqual(result.status_code, 404)
def test_login_page_loads(self):
# assert login page loads correctly
result = self.client.get('/login')
self.assertTrue(b'The best way to keep track of your dreams and goals' in result.data)
'''
def test_signup(self):
# register a new account
response = self.client.post(url_for('/signup'), data={
'username': 'hermano',
'email': 'herm@email.com',
'password': 'hard',
'confirm_password': 'hard'
})
self.assertTrue(response.status_code == 302)
'''
if __name__ == '__main__':
unittest.main()
|
|
f6c64846fc066403d39d7cb60ce0bcc455aff2d5
|
src/server/convert.py
|
src/server/convert.py
|
# midi-beeper-orchestra - program to create an orchestra from PC speakers
# Copyright (C) 2015 The Underscores
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import math
def MIDItoHz(MIDIval):
"""
Converts a MIDI note, MIDIval, value to the equivalent hertz value
"""
return 69 + 12 * math.log((MIDIval/440), 2)
def hzToMIDI(hz):
"""
Converts hertz, hz, to MIDI note equivalent
"""
midi = 2**((hz-69)/12) * 440
return int(midi + 0.5)
|
Add conversions for MIDI to hertz and hertz to MIDI
|
Add conversions for MIDI to hertz and hertz to MIDI
|
Python
|
agpl-3.0
|
TheUnderscores/midi-beeper-orchestra
|
Add conversions for MIDI to hertz and hertz to MIDI
|
# midi-beeper-orchestra - program to create an orchestra from PC speakers
# Copyright (C) 2015 The Underscores
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import math
def MIDItoHz(MIDIval):
"""
Converts a MIDI note, MIDIval, value to the equivalent hertz value
"""
return 69 + 12 * math.log((MIDIval/440), 2)
def hzToMIDI(hz):
"""
Converts hertz, hz, to MIDI note equivalent
"""
midi = 2**((hz-69)/12) * 440
return int(midi + 0.5)
|
<commit_before><commit_msg>Add conversions for MIDI to hertz and hertz to MIDI<commit_after>
|
# midi-beeper-orchestra - program to create an orchestra from PC speakers
# Copyright (C) 2015 The Underscores
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import math
def MIDItoHz(MIDIval):
"""
Converts a MIDI note, MIDIval, value to the equivalent hertz value
"""
return 69 + 12 * math.log((MIDIval/440), 2)
def hzToMIDI(hz):
"""
Converts hertz, hz, to MIDI note equivalent
"""
midi = 2**((hz-69)/12) * 440
return int(midi + 0.5)
|
Add conversions for MIDI to hertz and hertz to MIDI# midi-beeper-orchestra - program to create an orchestra from PC speakers
# Copyright (C) 2015 The Underscores
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import math
def MIDItoHz(MIDIval):
"""
Converts a MIDI note, MIDIval, value to the equivalent hertz value
"""
return 69 + 12 * math.log((MIDIval/440), 2)
def hzToMIDI(hz):
"""
Converts hertz, hz, to MIDI note equivalent
"""
midi = 2**((hz-69)/12) * 440
return int(midi + 0.5)
|
<commit_before><commit_msg>Add conversions for MIDI to hertz and hertz to MIDI<commit_after># midi-beeper-orchestra - program to create an orchestra from PC speakers
# Copyright (C) 2015 The Underscores
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import math
def MIDItoHz(MIDIval):
"""
Converts a MIDI note, MIDIval, value to the equivalent hertz value
"""
return 69 + 12 * math.log((MIDIval/440), 2)
def hzToMIDI(hz):
"""
Converts hertz, hz, to MIDI note equivalent
"""
midi = 2**((hz-69)/12) * 440
return int(midi + 0.5)
|
|
17cc4de3c0d6b7e3c843085a1f7f6694930a7e84
|
geometry/graham_scan/python/graham_scan.py
|
geometry/graham_scan/python/graham_scan.py
|
#!/usr/bin/env python
import Tkinter as tk
from random import random
def make_a_right_turn(a, b, c):
"""Going from a to b to c involves a right turn?"""
u = (c[0] - b[0], c[1] - b[1])
v = (a[0] - b[0], a[1] - b[1])
cross_product = u[0] * v[1] - u[1] * v[0]
return cross_product < 0
def graham_scan(points):
"""Calculate convex hull using Graham's Scan"""
points.sort()
upper_convex_hull = points[0:2]
for point in points[2:]:
while len(upper_convex_hull) > 1 and not make_a_right_turn(upper_convex_hull[-2], upper_convex_hull[-1], point):
upper_convex_hull.pop()
upper_convex_hull.append(point)
lower_convex_hull = points[-1:-3:-1]
for point in points[-3::-1]:
while len(lower_convex_hull) > 1 and not make_a_right_turn(lower_convex_hull[-2], lower_convex_hull[-1], point):
lower_convex_hull.pop()
lower_convex_hull.append(point)
return upper_convex_hull + lower_convex_hull[1:-1]
def test(n, width, height):
"""Test Graham's Scan algorithm with random points and draw the result"""
points = [(random() * width, random() * height) for _ in range(n)]
convex_hull = graham_scan(points)
# UI stuff
master = tk.Tk()
canvas = tk.Canvas(master, width=width, height=height)
canvas.pack()
canvas.create_polygon(*[coord for point in convex_hull for coord in point], outline='blue', width=2, fill='')
for (x, y) in points:
canvas.create_oval(x-1, y-1, x+1, y+1, fill='black')
tk.mainloop()
if __name__ == "__main__":
test(50, 640, 480)
|
Add Graham's Scan in Python
|
Add Graham's Scan in Python
|
Python
|
cc0-1.0
|
Deepak345/al-go-rithms,manikTharaka/al-go-rithms,Cnidarias/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,manikTharaka/al-go-rithms,Deepak345/al-go-rithms,ZoranPandovski/al-go-rithms,Cnidarias/al-go-rithms,ZoranPandovski/al-go-rithms,manikTharaka/al-go-rithms,ZoranPandovski/al-go-rithms,Deepak345/al-go-rithms,Deepak345/al-go-rithms,ZoranPandovski/al-go-rithms,Deepak345/al-go-rithms,manikTharaka/al-go-rithms,Cnidarias/al-go-rithms,Deepak345/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,Cnidarias/al-go-rithms,Cnidarias/al-go-rithms,Cnidarias/al-go-rithms,manikTharaka/al-go-rithms,Cnidarias/al-go-rithms,manikTharaka/al-go-rithms,Cnidarias/al-go-rithms,ZoranPandovski/al-go-rithms,manikTharaka/al-go-rithms,manikTharaka/al-go-rithms,Deepak345/al-go-rithms,Deepak345/al-go-rithms,Cnidarias/al-go-rithms,manikTharaka/al-go-rithms,Deepak345/al-go-rithms,Deepak345/al-go-rithms,ZoranPandovski/al-go-rithms,Deepak345/al-go-rithms,manikTharaka/al-go-rithms,manikTharaka/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,manikTharaka/al-go-rithms,Deepak345/al-go-rithms,ZoranPandovski/al-go-rithms,Cnidarias/al-go-rithms,Cnidarias/al-go-rithms,manikTharaka/al-go-rithms,ZoranPandovski/al-go-rithms,Deepak345/al-go-rithms,ZoranPandovski/al-go-rithms,Deepak345/al-go-rithms,manikTharaka/al-go-rithms,Deepak345/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,Deepak345/al-go-rithms,manikTharaka/al-go-rithms,Cnidarias/al-go-rithms,Cnidarias/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,manikTharaka/al-go-rithms,Cnidarias/al-go-rithms,Cnidarias/al-go-rithms
|
Add Graham's Scan in Python
|
#!/usr/bin/env python
import Tkinter as tk
from random import random
def make_a_right_turn(a, b, c):
"""Going from a to b to c involves a right turn?"""
u = (c[0] - b[0], c[1] - b[1])
v = (a[0] - b[0], a[1] - b[1])
cross_product = u[0] * v[1] - u[1] * v[0]
return cross_product < 0
def graham_scan(points):
"""Calculate convex hull using Graham's Scan"""
points.sort()
upper_convex_hull = points[0:2]
for point in points[2:]:
while len(upper_convex_hull) > 1 and not make_a_right_turn(upper_convex_hull[-2], upper_convex_hull[-1], point):
upper_convex_hull.pop()
upper_convex_hull.append(point)
lower_convex_hull = points[-1:-3:-1]
for point in points[-3::-1]:
while len(lower_convex_hull) > 1 and not make_a_right_turn(lower_convex_hull[-2], lower_convex_hull[-1], point):
lower_convex_hull.pop()
lower_convex_hull.append(point)
return upper_convex_hull + lower_convex_hull[1:-1]
def test(n, width, height):
"""Test Graham's Scan algorithm with random points and draw the result"""
points = [(random() * width, random() * height) for _ in range(n)]
convex_hull = graham_scan(points)
# UI stuff
master = tk.Tk()
canvas = tk.Canvas(master, width=width, height=height)
canvas.pack()
canvas.create_polygon(*[coord for point in convex_hull for coord in point], outline='blue', width=2, fill='')
for (x, y) in points:
canvas.create_oval(x-1, y-1, x+1, y+1, fill='black')
tk.mainloop()
if __name__ == "__main__":
test(50, 640, 480)
|
<commit_before><commit_msg>Add Graham's Scan in Python<commit_after>
|
#!/usr/bin/env python
import Tkinter as tk
from random import random
def make_a_right_turn(a, b, c):
"""Going from a to b to c involves a right turn?"""
u = (c[0] - b[0], c[1] - b[1])
v = (a[0] - b[0], a[1] - b[1])
cross_product = u[0] * v[1] - u[1] * v[0]
return cross_product < 0
def graham_scan(points):
"""Calculate convex hull using Graham's Scan"""
points.sort()
upper_convex_hull = points[0:2]
for point in points[2:]:
while len(upper_convex_hull) > 1 and not make_a_right_turn(upper_convex_hull[-2], upper_convex_hull[-1], point):
upper_convex_hull.pop()
upper_convex_hull.append(point)
lower_convex_hull = points[-1:-3:-1]
for point in points[-3::-1]:
while len(lower_convex_hull) > 1 and not make_a_right_turn(lower_convex_hull[-2], lower_convex_hull[-1], point):
lower_convex_hull.pop()
lower_convex_hull.append(point)
return upper_convex_hull + lower_convex_hull[1:-1]
def test(n, width, height):
"""Test Graham's Scan algorithm with random points and draw the result"""
points = [(random() * width, random() * height) for _ in range(n)]
convex_hull = graham_scan(points)
# UI stuff
master = tk.Tk()
canvas = tk.Canvas(master, width=width, height=height)
canvas.pack()
canvas.create_polygon(*[coord for point in convex_hull for coord in point], outline='blue', width=2, fill='')
for (x, y) in points:
canvas.create_oval(x-1, y-1, x+1, y+1, fill='black')
tk.mainloop()
if __name__ == "__main__":
test(50, 640, 480)
|
Add Graham's Scan in Python#!/usr/bin/env python
import Tkinter as tk
from random import random
def make_a_right_turn(a, b, c):
"""Going from a to b to c involves a right turn?"""
u = (c[0] - b[0], c[1] - b[1])
v = (a[0] - b[0], a[1] - b[1])
cross_product = u[0] * v[1] - u[1] * v[0]
return cross_product < 0
def graham_scan(points):
"""Calculate convex hull using Graham's Scan"""
points.sort()
upper_convex_hull = points[0:2]
for point in points[2:]:
while len(upper_convex_hull) > 1 and not make_a_right_turn(upper_convex_hull[-2], upper_convex_hull[-1], point):
upper_convex_hull.pop()
upper_convex_hull.append(point)
lower_convex_hull = points[-1:-3:-1]
for point in points[-3::-1]:
while len(lower_convex_hull) > 1 and not make_a_right_turn(lower_convex_hull[-2], lower_convex_hull[-1], point):
lower_convex_hull.pop()
lower_convex_hull.append(point)
return upper_convex_hull + lower_convex_hull[1:-1]
def test(n, width, height):
"""Test Graham's Scan algorithm with random points and draw the result"""
points = [(random() * width, random() * height) for _ in range(n)]
convex_hull = graham_scan(points)
# UI stuff
master = tk.Tk()
canvas = tk.Canvas(master, width=width, height=height)
canvas.pack()
canvas.create_polygon(*[coord for point in convex_hull for coord in point], outline='blue', width=2, fill='')
for (x, y) in points:
canvas.create_oval(x-1, y-1, x+1, y+1, fill='black')
tk.mainloop()
if __name__ == "__main__":
test(50, 640, 480)
|
<commit_before><commit_msg>Add Graham's Scan in Python<commit_after>#!/usr/bin/env python
import Tkinter as tk
from random import random
def make_a_right_turn(a, b, c):
"""Going from a to b to c involves a right turn?"""
u = (c[0] - b[0], c[1] - b[1])
v = (a[0] - b[0], a[1] - b[1])
cross_product = u[0] * v[1] - u[1] * v[0]
return cross_product < 0
def graham_scan(points):
"""Calculate convex hull using Graham's Scan"""
points.sort()
upper_convex_hull = points[0:2]
for point in points[2:]:
while len(upper_convex_hull) > 1 and not make_a_right_turn(upper_convex_hull[-2], upper_convex_hull[-1], point):
upper_convex_hull.pop()
upper_convex_hull.append(point)
lower_convex_hull = points[-1:-3:-1]
for point in points[-3::-1]:
while len(lower_convex_hull) > 1 and not make_a_right_turn(lower_convex_hull[-2], lower_convex_hull[-1], point):
lower_convex_hull.pop()
lower_convex_hull.append(point)
return upper_convex_hull + lower_convex_hull[1:-1]
def test(n, width, height):
"""Test Graham's Scan algorithm with random points and draw the result"""
points = [(random() * width, random() * height) for _ in range(n)]
convex_hull = graham_scan(points)
# UI stuff
master = tk.Tk()
canvas = tk.Canvas(master, width=width, height=height)
canvas.pack()
canvas.create_polygon(*[coord for point in convex_hull for coord in point], outline='blue', width=2, fill='')
for (x, y) in points:
canvas.create_oval(x-1, y-1, x+1, y+1, fill='black')
tk.mainloop()
if __name__ == "__main__":
test(50, 640, 480)
|
|
1d311f7e53ac1081d801e902d8cb1d9a0ad8d1ec
|
tests/compiler/test_loop_compilation.py
|
tests/compiler/test_loop_compilation.py
|
from tests.compiler import compile_local, LST_ID, IMPLICIT_ITERATOR_ID, IMPLICIT_ITERATION_ID
from thinglang.compiler.opcodes import OpcodePushLocal, OpcodeCallInternal, OpcodePopLocal, OpcodeJumpConditional, \
OpcodeJump
from thinglang.foundation.definitions import INTERNAL_TYPE_ORDERING
from thinglang.lexer.values.identifier import Identifier
def test_access_in_method_args():
print(compile_local('for number a in lst'))
assert compile_local('for number x in lst') == [
OpcodePushLocal(LST_ID),
OpcodeCallInternal(INTERNAL_TYPE_ORDERING[Identifier("list")], 4), # Create iterator
OpcodePopLocal(IMPLICIT_ITERATOR_ID), # Insert it into the frame
OpcodePushLocal(IMPLICIT_ITERATOR_ID), # TODO: is this optimal?
OpcodeCallInternal(INTERNAL_TYPE_ORDERING[Identifier('iterator')], 1), # Call has_next
OpcodeJumpConditional(23), # Jump outside if not
OpcodePushLocal(IMPLICIT_ITERATOR_ID),
OpcodeCallInternal(INTERNAL_TYPE_ORDERING[Identifier('iterator')], 2), # Call next
OpcodePopLocal(IMPLICIT_ITERATION_ID), # Insert into frame
OpcodeJump(16)
]
|
Add test for iteration loop bytecode generation
|
Add test for iteration loop bytecode generation
|
Python
|
mit
|
ytanay/thinglang,ytanay/thinglang,ytanay/thinglang,ytanay/thinglang
|
Add test for iteration loop bytecode generation
|
from tests.compiler import compile_local, LST_ID, IMPLICIT_ITERATOR_ID, IMPLICIT_ITERATION_ID
from thinglang.compiler.opcodes import OpcodePushLocal, OpcodeCallInternal, OpcodePopLocal, OpcodeJumpConditional, \
OpcodeJump
from thinglang.foundation.definitions import INTERNAL_TYPE_ORDERING
from thinglang.lexer.values.identifier import Identifier
def test_access_in_method_args():
print(compile_local('for number a in lst'))
assert compile_local('for number x in lst') == [
OpcodePushLocal(LST_ID),
OpcodeCallInternal(INTERNAL_TYPE_ORDERING[Identifier("list")], 4), # Create iterator
OpcodePopLocal(IMPLICIT_ITERATOR_ID), # Insert it into the frame
OpcodePushLocal(IMPLICIT_ITERATOR_ID), # TODO: is this optimal?
OpcodeCallInternal(INTERNAL_TYPE_ORDERING[Identifier('iterator')], 1), # Call has_next
OpcodeJumpConditional(23), # Jump outside if not
OpcodePushLocal(IMPLICIT_ITERATOR_ID),
OpcodeCallInternal(INTERNAL_TYPE_ORDERING[Identifier('iterator')], 2), # Call next
OpcodePopLocal(IMPLICIT_ITERATION_ID), # Insert into frame
OpcodeJump(16)
]
|
<commit_before><commit_msg>Add test for iteration loop bytecode generation<commit_after>
|
from tests.compiler import compile_local, LST_ID, IMPLICIT_ITERATOR_ID, IMPLICIT_ITERATION_ID
from thinglang.compiler.opcodes import OpcodePushLocal, OpcodeCallInternal, OpcodePopLocal, OpcodeJumpConditional, \
OpcodeJump
from thinglang.foundation.definitions import INTERNAL_TYPE_ORDERING
from thinglang.lexer.values.identifier import Identifier
def test_access_in_method_args():
print(compile_local('for number a in lst'))
assert compile_local('for number x in lst') == [
OpcodePushLocal(LST_ID),
OpcodeCallInternal(INTERNAL_TYPE_ORDERING[Identifier("list")], 4), # Create iterator
OpcodePopLocal(IMPLICIT_ITERATOR_ID), # Insert it into the frame
OpcodePushLocal(IMPLICIT_ITERATOR_ID), # TODO: is this optimal?
OpcodeCallInternal(INTERNAL_TYPE_ORDERING[Identifier('iterator')], 1), # Call has_next
OpcodeJumpConditional(23), # Jump outside if not
OpcodePushLocal(IMPLICIT_ITERATOR_ID),
OpcodeCallInternal(INTERNAL_TYPE_ORDERING[Identifier('iterator')], 2), # Call next
OpcodePopLocal(IMPLICIT_ITERATION_ID), # Insert into frame
OpcodeJump(16)
]
|
Add test for iteration loop bytecode generationfrom tests.compiler import compile_local, LST_ID, IMPLICIT_ITERATOR_ID, IMPLICIT_ITERATION_ID
from thinglang.compiler.opcodes import OpcodePushLocal, OpcodeCallInternal, OpcodePopLocal, OpcodeJumpConditional, \
OpcodeJump
from thinglang.foundation.definitions import INTERNAL_TYPE_ORDERING
from thinglang.lexer.values.identifier import Identifier
def test_access_in_method_args():
print(compile_local('for number a in lst'))
assert compile_local('for number x in lst') == [
OpcodePushLocal(LST_ID),
OpcodeCallInternal(INTERNAL_TYPE_ORDERING[Identifier("list")], 4), # Create iterator
OpcodePopLocal(IMPLICIT_ITERATOR_ID), # Insert it into the frame
OpcodePushLocal(IMPLICIT_ITERATOR_ID), # TODO: is this optimal?
OpcodeCallInternal(INTERNAL_TYPE_ORDERING[Identifier('iterator')], 1), # Call has_next
OpcodeJumpConditional(23), # Jump outside if not
OpcodePushLocal(IMPLICIT_ITERATOR_ID),
OpcodeCallInternal(INTERNAL_TYPE_ORDERING[Identifier('iterator')], 2), # Call next
OpcodePopLocal(IMPLICIT_ITERATION_ID), # Insert into frame
OpcodeJump(16)
]
|
<commit_before><commit_msg>Add test for iteration loop bytecode generation<commit_after>from tests.compiler import compile_local, LST_ID, IMPLICIT_ITERATOR_ID, IMPLICIT_ITERATION_ID
from thinglang.compiler.opcodes import OpcodePushLocal, OpcodeCallInternal, OpcodePopLocal, OpcodeJumpConditional, \
OpcodeJump
from thinglang.foundation.definitions import INTERNAL_TYPE_ORDERING
from thinglang.lexer.values.identifier import Identifier
def test_access_in_method_args():
print(compile_local('for number a in lst'))
assert compile_local('for number x in lst') == [
OpcodePushLocal(LST_ID),
OpcodeCallInternal(INTERNAL_TYPE_ORDERING[Identifier("list")], 4), # Create iterator
OpcodePopLocal(IMPLICIT_ITERATOR_ID), # Insert it into the frame
OpcodePushLocal(IMPLICIT_ITERATOR_ID), # TODO: is this optimal?
OpcodeCallInternal(INTERNAL_TYPE_ORDERING[Identifier('iterator')], 1), # Call has_next
OpcodeJumpConditional(23), # Jump outside if not
OpcodePushLocal(IMPLICIT_ITERATOR_ID),
OpcodeCallInternal(INTERNAL_TYPE_ORDERING[Identifier('iterator')], 2), # Call next
OpcodePopLocal(IMPLICIT_ITERATION_ID), # Insert into frame
OpcodeJump(16)
]
|
|
7c609188df1ef457440543beb9dc4dbf286abd87
|
test/test_cache_source.py
|
test/test_cache_source.py
|
import pytest
import large_image
from large_image.cache_util import cachesClear
from .datastore import datastore
@pytest.mark.singular
def testCacheSourceStyle():
cachesClear()
imagePath = datastore.fetch('sample_image.ptif')
ts1 = large_image.open(imagePath)
ts2 = large_image.open(imagePath, style={'max': 128})
ts3 = large_image.open(imagePath, style={'max': 160})
tile1 = ts1.getTile(0, 0, 4)
assert ts1.getTile(0, 0, 4) is not None
assert ts2.getTile(0, 0, 4) is not None
assert ts3.getTile(0, 0, 4) is not None
cachesClear()
assert ts1.getTile(0, 0, 4) == tile1
del ts1
assert ts2.getTile(1, 0, 4) is not None
cachesClear()
assert ts2.getTile(2, 0, 4) is not None
ts1 = large_image.open(imagePath)
assert ts1.getTile(0, 0, 4) == tile1
@pytest.mark.singular
def testCacheSourceStyleFirst():
cachesClear()
imagePath = datastore.fetch('sample_image.ptif')
ts2 = large_image.open(imagePath, style={'max': 128})
ts1 = large_image.open(imagePath)
tile1 = ts1.getTile(0, 0, 4)
assert ts1.getTile(0, 0, 4) is not None
assert ts2.getTile(0, 0, 4) is not None
del ts1
assert ts2.getTile(1, 0, 4) is not None
cachesClear()
assert ts2.getTile(2, 0, 4) is not None
ts1 = large_image.open(imagePath)
assert ts1.getTile(0, 0, 4) == tile1
|
Add some source cache tests.
|
Add some source cache tests.
|
Python
|
apache-2.0
|
girder/large_image,girder/large_image,girder/large_image
|
Add some source cache tests.
|
import pytest
import large_image
from large_image.cache_util import cachesClear
from .datastore import datastore
@pytest.mark.singular
def testCacheSourceStyle():
cachesClear()
imagePath = datastore.fetch('sample_image.ptif')
ts1 = large_image.open(imagePath)
ts2 = large_image.open(imagePath, style={'max': 128})
ts3 = large_image.open(imagePath, style={'max': 160})
tile1 = ts1.getTile(0, 0, 4)
assert ts1.getTile(0, 0, 4) is not None
assert ts2.getTile(0, 0, 4) is not None
assert ts3.getTile(0, 0, 4) is not None
cachesClear()
assert ts1.getTile(0, 0, 4) == tile1
del ts1
assert ts2.getTile(1, 0, 4) is not None
cachesClear()
assert ts2.getTile(2, 0, 4) is not None
ts1 = large_image.open(imagePath)
assert ts1.getTile(0, 0, 4) == tile1
@pytest.mark.singular
def testCacheSourceStyleFirst():
cachesClear()
imagePath = datastore.fetch('sample_image.ptif')
ts2 = large_image.open(imagePath, style={'max': 128})
ts1 = large_image.open(imagePath)
tile1 = ts1.getTile(0, 0, 4)
assert ts1.getTile(0, 0, 4) is not None
assert ts2.getTile(0, 0, 4) is not None
del ts1
assert ts2.getTile(1, 0, 4) is not None
cachesClear()
assert ts2.getTile(2, 0, 4) is not None
ts1 = large_image.open(imagePath)
assert ts1.getTile(0, 0, 4) == tile1
|
<commit_before><commit_msg>Add some source cache tests.<commit_after>
|
import pytest
import large_image
from large_image.cache_util import cachesClear
from .datastore import datastore
@pytest.mark.singular
def testCacheSourceStyle():
cachesClear()
imagePath = datastore.fetch('sample_image.ptif')
ts1 = large_image.open(imagePath)
ts2 = large_image.open(imagePath, style={'max': 128})
ts3 = large_image.open(imagePath, style={'max': 160})
tile1 = ts1.getTile(0, 0, 4)
assert ts1.getTile(0, 0, 4) is not None
assert ts2.getTile(0, 0, 4) is not None
assert ts3.getTile(0, 0, 4) is not None
cachesClear()
assert ts1.getTile(0, 0, 4) == tile1
del ts1
assert ts2.getTile(1, 0, 4) is not None
cachesClear()
assert ts2.getTile(2, 0, 4) is not None
ts1 = large_image.open(imagePath)
assert ts1.getTile(0, 0, 4) == tile1
@pytest.mark.singular
def testCacheSourceStyleFirst():
cachesClear()
imagePath = datastore.fetch('sample_image.ptif')
ts2 = large_image.open(imagePath, style={'max': 128})
ts1 = large_image.open(imagePath)
tile1 = ts1.getTile(0, 0, 4)
assert ts1.getTile(0, 0, 4) is not None
assert ts2.getTile(0, 0, 4) is not None
del ts1
assert ts2.getTile(1, 0, 4) is not None
cachesClear()
assert ts2.getTile(2, 0, 4) is not None
ts1 = large_image.open(imagePath)
assert ts1.getTile(0, 0, 4) == tile1
|
Add some source cache tests.import pytest
import large_image
from large_image.cache_util import cachesClear
from .datastore import datastore
@pytest.mark.singular
def testCacheSourceStyle():
cachesClear()
imagePath = datastore.fetch('sample_image.ptif')
ts1 = large_image.open(imagePath)
ts2 = large_image.open(imagePath, style={'max': 128})
ts3 = large_image.open(imagePath, style={'max': 160})
tile1 = ts1.getTile(0, 0, 4)
assert ts1.getTile(0, 0, 4) is not None
assert ts2.getTile(0, 0, 4) is not None
assert ts3.getTile(0, 0, 4) is not None
cachesClear()
assert ts1.getTile(0, 0, 4) == tile1
del ts1
assert ts2.getTile(1, 0, 4) is not None
cachesClear()
assert ts2.getTile(2, 0, 4) is not None
ts1 = large_image.open(imagePath)
assert ts1.getTile(0, 0, 4) == tile1
@pytest.mark.singular
def testCacheSourceStyleFirst():
cachesClear()
imagePath = datastore.fetch('sample_image.ptif')
ts2 = large_image.open(imagePath, style={'max': 128})
ts1 = large_image.open(imagePath)
tile1 = ts1.getTile(0, 0, 4)
assert ts1.getTile(0, 0, 4) is not None
assert ts2.getTile(0, 0, 4) is not None
del ts1
assert ts2.getTile(1, 0, 4) is not None
cachesClear()
assert ts2.getTile(2, 0, 4) is not None
ts1 = large_image.open(imagePath)
assert ts1.getTile(0, 0, 4) == tile1
|
<commit_before><commit_msg>Add some source cache tests.<commit_after>import pytest
import large_image
from large_image.cache_util import cachesClear
from .datastore import datastore
@pytest.mark.singular
def testCacheSourceStyle():
cachesClear()
imagePath = datastore.fetch('sample_image.ptif')
ts1 = large_image.open(imagePath)
ts2 = large_image.open(imagePath, style={'max': 128})
ts3 = large_image.open(imagePath, style={'max': 160})
tile1 = ts1.getTile(0, 0, 4)
assert ts1.getTile(0, 0, 4) is not None
assert ts2.getTile(0, 0, 4) is not None
assert ts3.getTile(0, 0, 4) is not None
cachesClear()
assert ts1.getTile(0, 0, 4) == tile1
del ts1
assert ts2.getTile(1, 0, 4) is not None
cachesClear()
assert ts2.getTile(2, 0, 4) is not None
ts1 = large_image.open(imagePath)
assert ts1.getTile(0, 0, 4) == tile1
@pytest.mark.singular
def testCacheSourceStyleFirst():
cachesClear()
imagePath = datastore.fetch('sample_image.ptif')
ts2 = large_image.open(imagePath, style={'max': 128})
ts1 = large_image.open(imagePath)
tile1 = ts1.getTile(0, 0, 4)
assert ts1.getTile(0, 0, 4) is not None
assert ts2.getTile(0, 0, 4) is not None
del ts1
assert ts2.getTile(1, 0, 4) is not None
cachesClear()
assert ts2.getTile(2, 0, 4) is not None
ts1 = large_image.open(imagePath)
assert ts1.getTile(0, 0, 4) == tile1
|
|
5d58788f75a7334def3dc5a2471c9e0ed2893589
|
test/item_in_init_test.py
|
test/item_in_init_test.py
|
# Copyright (c) 2012 Lars Hupfeldt Nielsen, Hupfeldt IT
# All rights reserved. This work is under a BSD license, see LICENSE.TXT.
from multiconf import mc_config, ConfigItem
from multiconf.envs import EnvFactory
ef = EnvFactory()
prod = ef.Env('prod')
def test_item_in_init_goes_to_parent():
parent = [None]
class X(ConfigItem):
def __init__(self, aa=1):
super(X, self).__init__()
self.aa = aa
class Y(X):
def __init__(self, aa=37):
parent[0] = self.contained_in
bb = X() # X is created in parent and ref assigned to bb
super(Y, self).__init__(aa)
self.bb = bb
self.cc = None
@mc_config(ef)
def _(_):
with ConfigItem():
with ConfigItem():
Y()
it = ef.config(prod).ConfigItem.ConfigItem
assert it == parent[0]
assert it.X.aa == 1
assert it.Y.aa == 37
assert it.Y.bb == it.X
assert it.Y.cc is None
|
Test ConfigItem created in __init__ goes to parent
|
Test ConfigItem created in __init__ goes to parent
|
Python
|
bsd-3-clause
|
lhupfeldt/multiconf
|
Test ConfigItem created in __init__ goes to parent
|
# Copyright (c) 2012 Lars Hupfeldt Nielsen, Hupfeldt IT
# All rights reserved. This work is under a BSD license, see LICENSE.TXT.
from multiconf import mc_config, ConfigItem
from multiconf.envs import EnvFactory
ef = EnvFactory()
prod = ef.Env('prod')
def test_item_in_init_goes_to_parent():
parent = [None]
class X(ConfigItem):
def __init__(self, aa=1):
super(X, self).__init__()
self.aa = aa
class Y(X):
def __init__(self, aa=37):
parent[0] = self.contained_in
bb = X() # X is created in parent and ref assigned to bb
super(Y, self).__init__(aa)
self.bb = bb
self.cc = None
@mc_config(ef)
def _(_):
with ConfigItem():
with ConfigItem():
Y()
it = ef.config(prod).ConfigItem.ConfigItem
assert it == parent[0]
assert it.X.aa == 1
assert it.Y.aa == 37
assert it.Y.bb == it.X
assert it.Y.cc is None
|
<commit_before><commit_msg>Test ConfigItem created in __init__ goes to parent<commit_after>
|
# Copyright (c) 2012 Lars Hupfeldt Nielsen, Hupfeldt IT
# All rights reserved. This work is under a BSD license, see LICENSE.TXT.
from multiconf import mc_config, ConfigItem
from multiconf.envs import EnvFactory
ef = EnvFactory()
prod = ef.Env('prod')
def test_item_in_init_goes_to_parent():
parent = [None]
class X(ConfigItem):
def __init__(self, aa=1):
super(X, self).__init__()
self.aa = aa
class Y(X):
def __init__(self, aa=37):
parent[0] = self.contained_in
bb = X() # X is created in parent and ref assigned to bb
super(Y, self).__init__(aa)
self.bb = bb
self.cc = None
@mc_config(ef)
def _(_):
with ConfigItem():
with ConfigItem():
Y()
it = ef.config(prod).ConfigItem.ConfigItem
assert it == parent[0]
assert it.X.aa == 1
assert it.Y.aa == 37
assert it.Y.bb == it.X
assert it.Y.cc is None
|
Test ConfigItem created in __init__ goes to parent# Copyright (c) 2012 Lars Hupfeldt Nielsen, Hupfeldt IT
# All rights reserved. This work is under a BSD license, see LICENSE.TXT.
from multiconf import mc_config, ConfigItem
from multiconf.envs import EnvFactory
ef = EnvFactory()
prod = ef.Env('prod')
def test_item_in_init_goes_to_parent():
parent = [None]
class X(ConfigItem):
def __init__(self, aa=1):
super(X, self).__init__()
self.aa = aa
class Y(X):
def __init__(self, aa=37):
parent[0] = self.contained_in
bb = X() # X is created in parent and ref assigned to bb
super(Y, self).__init__(aa)
self.bb = bb
self.cc = None
@mc_config(ef)
def _(_):
with ConfigItem():
with ConfigItem():
Y()
it = ef.config(prod).ConfigItem.ConfigItem
assert it == parent[0]
assert it.X.aa == 1
assert it.Y.aa == 37
assert it.Y.bb == it.X
assert it.Y.cc is None
|
<commit_before><commit_msg>Test ConfigItem created in __init__ goes to parent<commit_after># Copyright (c) 2012 Lars Hupfeldt Nielsen, Hupfeldt IT
# All rights reserved. This work is under a BSD license, see LICENSE.TXT.
from multiconf import mc_config, ConfigItem
from multiconf.envs import EnvFactory
ef = EnvFactory()
prod = ef.Env('prod')
def test_item_in_init_goes_to_parent():
parent = [None]
class X(ConfigItem):
def __init__(self, aa=1):
super(X, self).__init__()
self.aa = aa
class Y(X):
def __init__(self, aa=37):
parent[0] = self.contained_in
bb = X() # X is created in parent and ref assigned to bb
super(Y, self).__init__(aa)
self.bb = bb
self.cc = None
@mc_config(ef)
def _(_):
with ConfigItem():
with ConfigItem():
Y()
it = ef.config(prod).ConfigItem.ConfigItem
assert it == parent[0]
assert it.X.aa == 1
assert it.Y.aa == 37
assert it.Y.bb == it.X
assert it.Y.cc is None
|
|
8618c68046487d475c077cb30070c9080cc4fbc7
|
tests/test_WOA_from_nc.py
|
tests/test_WOA_from_nc.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
from datetime import datetime
from WOA.woa import WOA
def test_import():
# A shortcut
from WOA import WOA
db = WOA()
def test_available_vars():
db = WOA()
for v in ['TEMP', 'PSAL']:
assert v in db.keys()
def test_get_profile():
db = WOA()
db['TEMP'].get_profile(var='mn', doy=datetime.now(),
depth=0, lat=10, lon=330)
db['TEMP'].get_profile(var='mn', doy=datetime.now(),
depth=[0,10], lat=10, lon=330)
db['TEMP'].get_profile(doy=datetime.now(),
depth=[0,10], lat=10, lon=330)
|
Test prototype for WOA from a netCDF file.
|
Test prototype for WOA from a netCDF file.
|
Python
|
bsd-3-clause
|
castelao/oceansdb,castelao/pyWOA
|
Test prototype for WOA from a netCDF file.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
from datetime import datetime
from WOA.woa import WOA
def test_import():
# A shortcut
from WOA import WOA
db = WOA()
def test_available_vars():
db = WOA()
for v in ['TEMP', 'PSAL']:
assert v in db.keys()
def test_get_profile():
db = WOA()
db['TEMP'].get_profile(var='mn', doy=datetime.now(),
depth=0, lat=10, lon=330)
db['TEMP'].get_profile(var='mn', doy=datetime.now(),
depth=[0,10], lat=10, lon=330)
db['TEMP'].get_profile(doy=datetime.now(),
depth=[0,10], lat=10, lon=330)
|
<commit_before><commit_msg>Test prototype for WOA from a netCDF file.<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
from datetime import datetime
from WOA.woa import WOA
def test_import():
# A shortcut
from WOA import WOA
db = WOA()
def test_available_vars():
db = WOA()
for v in ['TEMP', 'PSAL']:
assert v in db.keys()
def test_get_profile():
db = WOA()
db['TEMP'].get_profile(var='mn', doy=datetime.now(),
depth=0, lat=10, lon=330)
db['TEMP'].get_profile(var='mn', doy=datetime.now(),
depth=[0,10], lat=10, lon=330)
db['TEMP'].get_profile(doy=datetime.now(),
depth=[0,10], lat=10, lon=330)
|
Test prototype for WOA from a netCDF file.#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
from datetime import datetime
from WOA.woa import WOA
def test_import():
# A shortcut
from WOA import WOA
db = WOA()
def test_available_vars():
db = WOA()
for v in ['TEMP', 'PSAL']:
assert v in db.keys()
def test_get_profile():
db = WOA()
db['TEMP'].get_profile(var='mn', doy=datetime.now(),
depth=0, lat=10, lon=330)
db['TEMP'].get_profile(var='mn', doy=datetime.now(),
depth=[0,10], lat=10, lon=330)
db['TEMP'].get_profile(doy=datetime.now(),
depth=[0,10], lat=10, lon=330)
|
<commit_before><commit_msg>Test prototype for WOA from a netCDF file.<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
from datetime import datetime
from WOA.woa import WOA
def test_import():
# A shortcut
from WOA import WOA
db = WOA()
def test_available_vars():
db = WOA()
for v in ['TEMP', 'PSAL']:
assert v in db.keys()
def test_get_profile():
db = WOA()
db['TEMP'].get_profile(var='mn', doy=datetime.now(),
depth=0, lat=10, lon=330)
db['TEMP'].get_profile(var='mn', doy=datetime.now(),
depth=[0,10], lat=10, lon=330)
db['TEMP'].get_profile(doy=datetime.now(),
depth=[0,10], lat=10, lon=330)
|
|
7593923070766f53a35d3c404523199f68accd3e
|
tests/test_user_config.py
|
tests/test_user_config.py
|
# -*- coding: utf-8 -*-
def test_config(testdir):
"""Make sure that pytest accepts the `cookies` fixture."""
# create a temporary pytest test module
testdir.makepyfile("""
import json
def test_user_dir(tmpdir_factory, _config_file):
basetemp = tmpdir_factory.getbasetemp()
assert _config_file.basename == 'config'
user_dir = _config_file.dirpath()
assert user_dir.fnmatch('user_dir?')
assert user_dir.dirpath() == basetemp
def test_valid_cookiecutter_config(_config_file):
config_text = _config_file.read()
config = json.loads(config_text)
user_dir = _config_file.dirpath()
expected = {
'cookiecutters_dir': str(user_dir.join('cookiecutters')),
'replay_dir': str(user_dir.join('cookiecutter_replay')),
'default_context': {}
}
assert config == expected
""")
# run pytest with the following cmd args
result = testdir.runpytest('-vv')
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines([
'*::test_user_dir PASSED',
'*::test_valid_cookiecutter_config PASSED',
])
# make sure that that we get a '0' exit code for the testsuite
assert result.ret == 0
|
Implement tests for a new _config_file fixture
|
Implement tests for a new _config_file fixture
|
Python
|
mit
|
hackebrot/pytest-cookies
|
Implement tests for a new _config_file fixture
|
# -*- coding: utf-8 -*-
def test_config(testdir):
"""Make sure that pytest accepts the `cookies` fixture."""
# create a temporary pytest test module
testdir.makepyfile("""
import json
def test_user_dir(tmpdir_factory, _config_file):
basetemp = tmpdir_factory.getbasetemp()
assert _config_file.basename == 'config'
user_dir = _config_file.dirpath()
assert user_dir.fnmatch('user_dir?')
assert user_dir.dirpath() == basetemp
def test_valid_cookiecutter_config(_config_file):
config_text = _config_file.read()
config = json.loads(config_text)
user_dir = _config_file.dirpath()
expected = {
'cookiecutters_dir': str(user_dir.join('cookiecutters')),
'replay_dir': str(user_dir.join('cookiecutter_replay')),
'default_context': {}
}
assert config == expected
""")
# run pytest with the following cmd args
result = testdir.runpytest('-vv')
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines([
'*::test_user_dir PASSED',
'*::test_valid_cookiecutter_config PASSED',
])
# make sure that that we get a '0' exit code for the testsuite
assert result.ret == 0
|
<commit_before><commit_msg>Implement tests for a new _config_file fixture<commit_after>
|
# -*- coding: utf-8 -*-
def test_config(testdir):
"""Make sure that pytest accepts the `cookies` fixture."""
# create a temporary pytest test module
testdir.makepyfile("""
import json
def test_user_dir(tmpdir_factory, _config_file):
basetemp = tmpdir_factory.getbasetemp()
assert _config_file.basename == 'config'
user_dir = _config_file.dirpath()
assert user_dir.fnmatch('user_dir?')
assert user_dir.dirpath() == basetemp
def test_valid_cookiecutter_config(_config_file):
config_text = _config_file.read()
config = json.loads(config_text)
user_dir = _config_file.dirpath()
expected = {
'cookiecutters_dir': str(user_dir.join('cookiecutters')),
'replay_dir': str(user_dir.join('cookiecutter_replay')),
'default_context': {}
}
assert config == expected
""")
# run pytest with the following cmd args
result = testdir.runpytest('-vv')
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines([
'*::test_user_dir PASSED',
'*::test_valid_cookiecutter_config PASSED',
])
# make sure that that we get a '0' exit code for the testsuite
assert result.ret == 0
|
Implement tests for a new _config_file fixture# -*- coding: utf-8 -*-
def test_config(testdir):
"""Make sure that pytest accepts the `cookies` fixture."""
# create a temporary pytest test module
testdir.makepyfile("""
import json
def test_user_dir(tmpdir_factory, _config_file):
basetemp = tmpdir_factory.getbasetemp()
assert _config_file.basename == 'config'
user_dir = _config_file.dirpath()
assert user_dir.fnmatch('user_dir?')
assert user_dir.dirpath() == basetemp
def test_valid_cookiecutter_config(_config_file):
config_text = _config_file.read()
config = json.loads(config_text)
user_dir = _config_file.dirpath()
expected = {
'cookiecutters_dir': str(user_dir.join('cookiecutters')),
'replay_dir': str(user_dir.join('cookiecutter_replay')),
'default_context': {}
}
assert config == expected
""")
# run pytest with the following cmd args
result = testdir.runpytest('-vv')
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines([
'*::test_user_dir PASSED',
'*::test_valid_cookiecutter_config PASSED',
])
# make sure that that we get a '0' exit code for the testsuite
assert result.ret == 0
|
<commit_before><commit_msg>Implement tests for a new _config_file fixture<commit_after># -*- coding: utf-8 -*-
def test_config(testdir):
"""Make sure that pytest accepts the `cookies` fixture."""
# create a temporary pytest test module
testdir.makepyfile("""
import json
def test_user_dir(tmpdir_factory, _config_file):
basetemp = tmpdir_factory.getbasetemp()
assert _config_file.basename == 'config'
user_dir = _config_file.dirpath()
assert user_dir.fnmatch('user_dir?')
assert user_dir.dirpath() == basetemp
def test_valid_cookiecutter_config(_config_file):
config_text = _config_file.read()
config = json.loads(config_text)
user_dir = _config_file.dirpath()
expected = {
'cookiecutters_dir': str(user_dir.join('cookiecutters')),
'replay_dir': str(user_dir.join('cookiecutter_replay')),
'default_context': {}
}
assert config == expected
""")
# run pytest with the following cmd args
result = testdir.runpytest('-vv')
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines([
'*::test_user_dir PASSED',
'*::test_valid_cookiecutter_config PASSED',
])
# make sure that that we get a '0' exit code for the testsuite
assert result.ret == 0
|
|
4973cf7fda38168c8189d77ced2ee2a2c89cadfa
|
py/can-place-flowers.py
|
py/can-place-flowers.py
|
from itertools import groupby
class Solution(object):
def canPlaceFlowers(self, flowerbed, n):
"""
:type flowerbed: List[int]
:type n: int
:rtype: bool
"""
prev = None
l = len(flowerbed)
for i, f in enumerate(flowerbed):
if f == 0:
if not prev and (i >= l - 1 or flowerbed[i + 1] == 0):
flowerbed[i] = 1
n -= 1
prev = flowerbed[i]
return n <= 0
|
Add py solution for 605. Can Place Flowers
|
Add py solution for 605. Can Place Flowers
605. Can Place Flowers: https://leetcode.com/problems/can-place-flowers/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 605. Can Place Flowers
605. Can Place Flowers: https://leetcode.com/problems/can-place-flowers/
|
from itertools import groupby
class Solution(object):
def canPlaceFlowers(self, flowerbed, n):
"""
:type flowerbed: List[int]
:type n: int
:rtype: bool
"""
prev = None
l = len(flowerbed)
for i, f in enumerate(flowerbed):
if f == 0:
if not prev and (i >= l - 1 or flowerbed[i + 1] == 0):
flowerbed[i] = 1
n -= 1
prev = flowerbed[i]
return n <= 0
|
<commit_before><commit_msg>Add py solution for 605. Can Place Flowers
605. Can Place Flowers: https://leetcode.com/problems/can-place-flowers/<commit_after>
|
from itertools import groupby
class Solution(object):
def canPlaceFlowers(self, flowerbed, n):
"""
:type flowerbed: List[int]
:type n: int
:rtype: bool
"""
prev = None
l = len(flowerbed)
for i, f in enumerate(flowerbed):
if f == 0:
if not prev and (i >= l - 1 or flowerbed[i + 1] == 0):
flowerbed[i] = 1
n -= 1
prev = flowerbed[i]
return n <= 0
|
Add py solution for 605. Can Place Flowers
605. Can Place Flowers: https://leetcode.com/problems/can-place-flowers/from itertools import groupby
class Solution(object):
def canPlaceFlowers(self, flowerbed, n):
"""
:type flowerbed: List[int]
:type n: int
:rtype: bool
"""
prev = None
l = len(flowerbed)
for i, f in enumerate(flowerbed):
if f == 0:
if not prev and (i >= l - 1 or flowerbed[i + 1] == 0):
flowerbed[i] = 1
n -= 1
prev = flowerbed[i]
return n <= 0
|
<commit_before><commit_msg>Add py solution for 605. Can Place Flowers
605. Can Place Flowers: https://leetcode.com/problems/can-place-flowers/<commit_after>from itertools import groupby
class Solution(object):
def canPlaceFlowers(self, flowerbed, n):
"""
:type flowerbed: List[int]
:type n: int
:rtype: bool
"""
prev = None
l = len(flowerbed)
for i, f in enumerate(flowerbed):
if f == 0:
if not prev and (i >= l - 1 or flowerbed[i + 1] == 0):
flowerbed[i] = 1
n -= 1
prev = flowerbed[i]
return n <= 0
|
|
5dc2ad1bf129ba2b4f77602678f8e62d26d132a9
|
utils/add_sample_feeds.py
|
utils/add_sample_feeds.py
|
from smoke_signal import app, init_db
from smoke_signal.database.helpers import add_feed
from utils.generate_feed import SampleFeed
from os import walk
feeds_dir = app.root_path + "/test_resources/feeds/"
app.config['DATABASE_PATH'] = 'sqlite:///smoke_signal/test_resources/posts.db'
def create_sample_feed_files(num_feeds, num_items):
for i in range(num_feeds):
feed = SampleFeed("Test feed {}".format(i))
for j in range(num_items):
feed.add_item()
filename = feeds_dir + "feed{}.xml".format(i)
with open(filename, "w+") as f:
f.write(feed.__str__())
def add_feeds_to_db():
filenames = next(walk(feeds_dir))[2]
with app.app_context():
init_db()
for filename in filenames:
add_feed("file://" + feeds_dir + filename)
|
Add new utility script to add sample feeds as files
|
Add new utility script to add sample feeds as files
|
Python
|
mit
|
flacerdk/smoke-signal,flacerdk/smoke-signal,flacerdk/smoke-signal
|
Add new utility script to add sample feeds as files
|
from smoke_signal import app, init_db
from smoke_signal.database.helpers import add_feed
from utils.generate_feed import SampleFeed
from os import walk
feeds_dir = app.root_path + "/test_resources/feeds/"
app.config['DATABASE_PATH'] = 'sqlite:///smoke_signal/test_resources/posts.db'
def create_sample_feed_files(num_feeds, num_items):
for i in range(num_feeds):
feed = SampleFeed("Test feed {}".format(i))
for j in range(num_items):
feed.add_item()
filename = feeds_dir + "feed{}.xml".format(i)
with open(filename, "w+") as f:
f.write(feed.__str__())
def add_feeds_to_db():
filenames = next(walk(feeds_dir))[2]
with app.app_context():
init_db()
for filename in filenames:
add_feed("file://" + feeds_dir + filename)
|
<commit_before><commit_msg>Add new utility script to add sample feeds as files<commit_after>
|
from smoke_signal import app, init_db
from smoke_signal.database.helpers import add_feed
from utils.generate_feed import SampleFeed
from os import walk
feeds_dir = app.root_path + "/test_resources/feeds/"
app.config['DATABASE_PATH'] = 'sqlite:///smoke_signal/test_resources/posts.db'
def create_sample_feed_files(num_feeds, num_items):
for i in range(num_feeds):
feed = SampleFeed("Test feed {}".format(i))
for j in range(num_items):
feed.add_item()
filename = feeds_dir + "feed{}.xml".format(i)
with open(filename, "w+") as f:
f.write(feed.__str__())
def add_feeds_to_db():
filenames = next(walk(feeds_dir))[2]
with app.app_context():
init_db()
for filename in filenames:
add_feed("file://" + feeds_dir + filename)
|
Add new utility script to add sample feeds as filesfrom smoke_signal import app, init_db
from smoke_signal.database.helpers import add_feed
from utils.generate_feed import SampleFeed
from os import walk
feeds_dir = app.root_path + "/test_resources/feeds/"
app.config['DATABASE_PATH'] = 'sqlite:///smoke_signal/test_resources/posts.db'
def create_sample_feed_files(num_feeds, num_items):
for i in range(num_feeds):
feed = SampleFeed("Test feed {}".format(i))
for j in range(num_items):
feed.add_item()
filename = feeds_dir + "feed{}.xml".format(i)
with open(filename, "w+") as f:
f.write(feed.__str__())
def add_feeds_to_db():
filenames = next(walk(feeds_dir))[2]
with app.app_context():
init_db()
for filename in filenames:
add_feed("file://" + feeds_dir + filename)
|
<commit_before><commit_msg>Add new utility script to add sample feeds as files<commit_after>from smoke_signal import app, init_db
from smoke_signal.database.helpers import add_feed
from utils.generate_feed import SampleFeed
from os import walk
feeds_dir = app.root_path + "/test_resources/feeds/"
app.config['DATABASE_PATH'] = 'sqlite:///smoke_signal/test_resources/posts.db'
def create_sample_feed_files(num_feeds, num_items):
for i in range(num_feeds):
feed = SampleFeed("Test feed {}".format(i))
for j in range(num_items):
feed.add_item()
filename = feeds_dir + "feed{}.xml".format(i)
with open(filename, "w+") as f:
f.write(feed.__str__())
def add_feeds_to_db():
filenames = next(walk(feeds_dir))[2]
with app.app_context():
init_db()
for filename in filenames:
add_feed("file://" + feeds_dir + filename)
|
|
399568bbb0c88b2aa3919ac3552483a9dd8f01ab
|
python/examples/instruction-iterator.py
|
python/examples/instruction-iterator.py
|
#!/usr/bin/env python
import sys
try:
import binaryninja
except ImportError:
sys.path.append("/Applications/Binary Ninja.app/Contents/Resources/python/")
import binaryninja
import time
if sys.platform.lower().startswith("linux"):
bintype="ELF"
elif sys.platform.lower() == "darwin":
bintype="Mach-O"
else:
raise Exception, "%s is not supported on this plugin" % sys.platform
if len(sys.argv) > 1:
target = sys.argv[1]
else:
target = "/bin/ls"
bv = binaryninja.BinaryViewType[bintype].open(target)
bv.update_analysis()
"""Until update_analysis_and_wait is complete, sleep is necessary as the analysis is multi-threaded."""
time.sleep(1)
print "-------- %s --------" % target
print "START: 0x%x" % bv.start
print "ENTRY: 0x%x" % bv.entry_point
print "ARCH: %s" % bv.arch.name
print "\n-------- Function List --------"
""" print all the functions, their basic blocks, and their il instructions """
for func in bv.functions:
print repr(func)
for block in func.low_level_il:
print "\t{0}".format(block)
for insn in block:
print "\t\t{0}".format(insn)
""" print all the functions, their basic blocks, and their mc instructions """
for func in bv.functions:
print repr(func)
for block in func:
print "\t{0}".format(block)
for insn in block:
print "\t\t{0}".format(insn)
|
Add an example that uses the interators
|
Add an example that uses the interators
|
Python
|
mit
|
Vector35/binaryninja-api,Vector35/binaryninja-api,joshwatson/binaryninja-api,Vector35/binaryninja-api,Vector35/binaryninja-api,joshwatson/binaryninja-api,joshwatson/binaryninja-api,joshwatson/binaryninja-api,Vector35/binaryninja-api,joshwatson/binaryninja-api,Vector35/binaryninja-api,Vector35/binaryninja-api
|
Add an example that uses the interators
|
#!/usr/bin/env python
import sys
try:
import binaryninja
except ImportError:
sys.path.append("/Applications/Binary Ninja.app/Contents/Resources/python/")
import binaryninja
import time
if sys.platform.lower().startswith("linux"):
bintype="ELF"
elif sys.platform.lower() == "darwin":
bintype="Mach-O"
else:
raise Exception, "%s is not supported on this plugin" % sys.platform
if len(sys.argv) > 1:
target = sys.argv[1]
else:
target = "/bin/ls"
bv = binaryninja.BinaryViewType[bintype].open(target)
bv.update_analysis()
"""Until update_analysis_and_wait is complete, sleep is necessary as the analysis is multi-threaded."""
time.sleep(1)
print "-------- %s --------" % target
print "START: 0x%x" % bv.start
print "ENTRY: 0x%x" % bv.entry_point
print "ARCH: %s" % bv.arch.name
print "\n-------- Function List --------"
""" print all the functions, their basic blocks, and their il instructions """
for func in bv.functions:
print repr(func)
for block in func.low_level_il:
print "\t{0}".format(block)
for insn in block:
print "\t\t{0}".format(insn)
""" print all the functions, their basic blocks, and their mc instructions """
for func in bv.functions:
print repr(func)
for block in func:
print "\t{0}".format(block)
for insn in block:
print "\t\t{0}".format(insn)
|
<commit_before><commit_msg>Add an example that uses the interators<commit_after>
|
#!/usr/bin/env python
import sys
try:
import binaryninja
except ImportError:
sys.path.append("/Applications/Binary Ninja.app/Contents/Resources/python/")
import binaryninja
import time
if sys.platform.lower().startswith("linux"):
bintype="ELF"
elif sys.platform.lower() == "darwin":
bintype="Mach-O"
else:
raise Exception, "%s is not supported on this plugin" % sys.platform
if len(sys.argv) > 1:
target = sys.argv[1]
else:
target = "/bin/ls"
bv = binaryninja.BinaryViewType[bintype].open(target)
bv.update_analysis()
"""Until update_analysis_and_wait is complete, sleep is necessary as the analysis is multi-threaded."""
time.sleep(1)
print "-------- %s --------" % target
print "START: 0x%x" % bv.start
print "ENTRY: 0x%x" % bv.entry_point
print "ARCH: %s" % bv.arch.name
print "\n-------- Function List --------"
""" print all the functions, their basic blocks, and their il instructions """
for func in bv.functions:
print repr(func)
for block in func.low_level_il:
print "\t{0}".format(block)
for insn in block:
print "\t\t{0}".format(insn)
""" print all the functions, their basic blocks, and their mc instructions """
for func in bv.functions:
print repr(func)
for block in func:
print "\t{0}".format(block)
for insn in block:
print "\t\t{0}".format(insn)
|
Add an example that uses the interators#!/usr/bin/env python
import sys
try:
import binaryninja
except ImportError:
sys.path.append("/Applications/Binary Ninja.app/Contents/Resources/python/")
import binaryninja
import time
if sys.platform.lower().startswith("linux"):
bintype="ELF"
elif sys.platform.lower() == "darwin":
bintype="Mach-O"
else:
raise Exception, "%s is not supported on this plugin" % sys.platform
if len(sys.argv) > 1:
target = sys.argv[1]
else:
target = "/bin/ls"
bv = binaryninja.BinaryViewType[bintype].open(target)
bv.update_analysis()
"""Until update_analysis_and_wait is complete, sleep is necessary as the analysis is multi-threaded."""
time.sleep(1)
print "-------- %s --------" % target
print "START: 0x%x" % bv.start
print "ENTRY: 0x%x" % bv.entry_point
print "ARCH: %s" % bv.arch.name
print "\n-------- Function List --------"
""" print all the functions, their basic blocks, and their il instructions """
for func in bv.functions:
print repr(func)
for block in func.low_level_il:
print "\t{0}".format(block)
for insn in block:
print "\t\t{0}".format(insn)
""" print all the functions, their basic blocks, and their mc instructions """
for func in bv.functions:
print repr(func)
for block in func:
print "\t{0}".format(block)
for insn in block:
print "\t\t{0}".format(insn)
|
<commit_before><commit_msg>Add an example that uses the interators<commit_after>#!/usr/bin/env python
import sys
try:
import binaryninja
except ImportError:
sys.path.append("/Applications/Binary Ninja.app/Contents/Resources/python/")
import binaryninja
import time
if sys.platform.lower().startswith("linux"):
bintype="ELF"
elif sys.platform.lower() == "darwin":
bintype="Mach-O"
else:
raise Exception, "%s is not supported on this plugin" % sys.platform
if len(sys.argv) > 1:
target = sys.argv[1]
else:
target = "/bin/ls"
bv = binaryninja.BinaryViewType[bintype].open(target)
bv.update_analysis()
"""Until update_analysis_and_wait is complete, sleep is necessary as the analysis is multi-threaded."""
time.sleep(1)
print "-------- %s --------" % target
print "START: 0x%x" % bv.start
print "ENTRY: 0x%x" % bv.entry_point
print "ARCH: %s" % bv.arch.name
print "\n-------- Function List --------"
""" print all the functions, their basic blocks, and their il instructions """
for func in bv.functions:
print repr(func)
for block in func.low_level_il:
print "\t{0}".format(block)
for insn in block:
print "\t\t{0}".format(insn)
""" print all the functions, their basic blocks, and their mc instructions """
for func in bv.functions:
print repr(func)
for block in func:
print "\t{0}".format(block)
for insn in block:
print "\t\t{0}".format(insn)
|
|
214c81265db7dc23a805717126fad6f97d391fe8
|
scripts/markers/mark_error_105.py
|
scripts/markers/mark_error_105.py
|
"""Marks all fixed errors #105 on ruwiki's CheckWikipedia."""
import re
import pywikibot
from checkwiki import load_page_list, mark_error_done, log
NUMBER = "105"
def main():
"""Main script function."""
site = pywikibot.Site()
for pagename in load_page_list(NUMBER):
page = pywikibot.Page(site, pagename)
error = False
for line in page.text.split("\n"):
match = re.search(r"==+$", line)
if not match:
continue
if line.startswith(match.group(0)):
continue
error = True
break
if error:
log(pagename, success=False)
else:
mark_error_done(NUMBER, page.title())
log(pagename, success=True)
if __name__ == "__main__":
main()
|
Add marker for 105 error
|
Add marker for 105 error
|
Python
|
mit
|
Facenapalm/NapalmBot
|
Add marker for 105 error
|
"""Marks all fixed errors #105 on ruwiki's CheckWikipedia."""
import re
import pywikibot
from checkwiki import load_page_list, mark_error_done, log
NUMBER = "105"
def main():
"""Main script function."""
site = pywikibot.Site()
for pagename in load_page_list(NUMBER):
page = pywikibot.Page(site, pagename)
error = False
for line in page.text.split("\n"):
match = re.search(r"==+$", line)
if not match:
continue
if line.startswith(match.group(0)):
continue
error = True
break
if error:
log(pagename, success=False)
else:
mark_error_done(NUMBER, page.title())
log(pagename, success=True)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add marker for 105 error<commit_after>
|
"""Marks all fixed errors #105 on ruwiki's CheckWikipedia."""
import re
import pywikibot
from checkwiki import load_page_list, mark_error_done, log
NUMBER = "105"
def main():
"""Main script function."""
site = pywikibot.Site()
for pagename in load_page_list(NUMBER):
page = pywikibot.Page(site, pagename)
error = False
for line in page.text.split("\n"):
match = re.search(r"==+$", line)
if not match:
continue
if line.startswith(match.group(0)):
continue
error = True
break
if error:
log(pagename, success=False)
else:
mark_error_done(NUMBER, page.title())
log(pagename, success=True)
if __name__ == "__main__":
main()
|
Add marker for 105 error"""Marks all fixed errors #105 on ruwiki's CheckWikipedia."""
import re
import pywikibot
from checkwiki import load_page_list, mark_error_done, log
NUMBER = "105"
def main():
"""Main script function."""
site = pywikibot.Site()
for pagename in load_page_list(NUMBER):
page = pywikibot.Page(site, pagename)
error = False
for line in page.text.split("\n"):
match = re.search(r"==+$", line)
if not match:
continue
if line.startswith(match.group(0)):
continue
error = True
break
if error:
log(pagename, success=False)
else:
mark_error_done(NUMBER, page.title())
log(pagename, success=True)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add marker for 105 error<commit_after>"""Marks all fixed errors #105 on ruwiki's CheckWikipedia."""
import re
import pywikibot
from checkwiki import load_page_list, mark_error_done, log
NUMBER = "105"
def main():
"""Main script function."""
site = pywikibot.Site()
for pagename in load_page_list(NUMBER):
page = pywikibot.Page(site, pagename)
error = False
for line in page.text.split("\n"):
match = re.search(r"==+$", line)
if not match:
continue
if line.startswith(match.group(0)):
continue
error = True
break
if error:
log(pagename, success=False)
else:
mark_error_done(NUMBER, page.title())
log(pagename, success=True)
if __name__ == "__main__":
main()
|
|
54c5f4f476cebec063652f5e4c6acd30bf2dee2e
|
nova/tests/unit/cmd/test_cmd_db_blocks.py
|
nova/tests/unit/cmd/test_cmd_db_blocks.py
|
# Copyright 2016 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import mock
from nova.cmd import compute
from nova.cmd import network
from nova import db
from nova import exception
from nova import test
@contextlib.contextmanager
def restore_db():
orig = db.api.IMPL
try:
yield
finally:
db.api.IMPL = orig
class ComputeMainTest(test.NoDBTestCase):
@mock.patch('nova.utils.monkey_patch')
@mock.patch('nova.conductor.api.API.wait_until_ready')
@mock.patch('oslo_reports.guru_meditation_report')
def _call_main(self, mod, gmr, cond, patch):
@mock.patch.object(mod, 'config')
@mock.patch.object(mod, 'service')
def run_main(serv, conf):
mod.main()
run_main()
def test_compute_main_blocks_db(self):
with restore_db():
self._call_main(compute)
self.assertRaises(exception.DBNotAllowed,
db.api.instance_get, 1, 2)
def test_network_main_blocks_db(self):
with restore_db():
self._call_main(network)
self.assertRaises(exception.DBNotAllowed,
db.api.instance_get, 1, 2)
|
Add test for nova-compute and nova-network main database blocks
|
Add test for nova-compute and nova-network main database blocks
We block the database objects when conductor is not local for compute
and network, but we don't test this code anywhere because it's in the
main() function of the actual executable.
Fix that.
Change-Id: I5b9343d30e6b4aedb05f0731ba9bdca51d408ba9
|
Python
|
apache-2.0
|
klmitch/nova,gooddata/openstack-nova,hanlind/nova,mahak/nova,Juniper/nova,phenoxim/nova,vmturbo/nova,phenoxim/nova,mahak/nova,vmturbo/nova,rajalokan/nova,rajalokan/nova,openstack/nova,mikalstill/nova,gooddata/openstack-nova,mikalstill/nova,alaski/nova,sebrandon1/nova,sebrandon1/nova,openstack/nova,alaski/nova,mahak/nova,gooddata/openstack-nova,rahulunair/nova,klmitch/nova,cloudbase/nova,mikalstill/nova,Juniper/nova,Juniper/nova,klmitch/nova,jianghuaw/nova,rajalokan/nova,Juniper/nova,openstack/nova,hanlind/nova,rajalokan/nova,cloudbase/nova,hanlind/nova,jianghuaw/nova,rahulunair/nova,jianghuaw/nova,sebrandon1/nova,vmturbo/nova,cloudbase/nova,vmturbo/nova,gooddata/openstack-nova,rahulunair/nova,klmitch/nova,jianghuaw/nova
|
Add test for nova-compute and nova-network main database blocks
We block the database objects when conductor is not local for compute
and network, but we don't test this code anywhere because it's in the
main() function of the actual executable.
Fix that.
Change-Id: I5b9343d30e6b4aedb05f0731ba9bdca51d408ba9
|
# Copyright 2016 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import mock
from nova.cmd import compute
from nova.cmd import network
from nova import db
from nova import exception
from nova import test
@contextlib.contextmanager
def restore_db():
orig = db.api.IMPL
try:
yield
finally:
db.api.IMPL = orig
class ComputeMainTest(test.NoDBTestCase):
@mock.patch('nova.utils.monkey_patch')
@mock.patch('nova.conductor.api.API.wait_until_ready')
@mock.patch('oslo_reports.guru_meditation_report')
def _call_main(self, mod, gmr, cond, patch):
@mock.patch.object(mod, 'config')
@mock.patch.object(mod, 'service')
def run_main(serv, conf):
mod.main()
run_main()
def test_compute_main_blocks_db(self):
with restore_db():
self._call_main(compute)
self.assertRaises(exception.DBNotAllowed,
db.api.instance_get, 1, 2)
def test_network_main_blocks_db(self):
with restore_db():
self._call_main(network)
self.assertRaises(exception.DBNotAllowed,
db.api.instance_get, 1, 2)
|
<commit_before><commit_msg>Add test for nova-compute and nova-network main database blocks
We block the database objects when conductor is not local for compute
and network, but we don't test this code anywhere because it's in the
main() function of the actual executable.
Fix that.
Change-Id: I5b9343d30e6b4aedb05f0731ba9bdca51d408ba9<commit_after>
|
# Copyright 2016 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import mock
from nova.cmd import compute
from nova.cmd import network
from nova import db
from nova import exception
from nova import test
@contextlib.contextmanager
def restore_db():
orig = db.api.IMPL
try:
yield
finally:
db.api.IMPL = orig
class ComputeMainTest(test.NoDBTestCase):
@mock.patch('nova.utils.monkey_patch')
@mock.patch('nova.conductor.api.API.wait_until_ready')
@mock.patch('oslo_reports.guru_meditation_report')
def _call_main(self, mod, gmr, cond, patch):
@mock.patch.object(mod, 'config')
@mock.patch.object(mod, 'service')
def run_main(serv, conf):
mod.main()
run_main()
def test_compute_main_blocks_db(self):
with restore_db():
self._call_main(compute)
self.assertRaises(exception.DBNotAllowed,
db.api.instance_get, 1, 2)
def test_network_main_blocks_db(self):
with restore_db():
self._call_main(network)
self.assertRaises(exception.DBNotAllowed,
db.api.instance_get, 1, 2)
|
Add test for nova-compute and nova-network main database blocks
We block the database objects when conductor is not local for compute
and network, but we don't test this code anywhere because it's in the
main() function of the actual executable.
Fix that.
Change-Id: I5b9343d30e6b4aedb05f0731ba9bdca51d408ba9# Copyright 2016 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import mock
from nova.cmd import compute
from nova.cmd import network
from nova import db
from nova import exception
from nova import test
@contextlib.contextmanager
def restore_db():
orig = db.api.IMPL
try:
yield
finally:
db.api.IMPL = orig
class ComputeMainTest(test.NoDBTestCase):
@mock.patch('nova.utils.monkey_patch')
@mock.patch('nova.conductor.api.API.wait_until_ready')
@mock.patch('oslo_reports.guru_meditation_report')
def _call_main(self, mod, gmr, cond, patch):
@mock.patch.object(mod, 'config')
@mock.patch.object(mod, 'service')
def run_main(serv, conf):
mod.main()
run_main()
def test_compute_main_blocks_db(self):
with restore_db():
self._call_main(compute)
self.assertRaises(exception.DBNotAllowed,
db.api.instance_get, 1, 2)
def test_network_main_blocks_db(self):
with restore_db():
self._call_main(network)
self.assertRaises(exception.DBNotAllowed,
db.api.instance_get, 1, 2)
|
<commit_before><commit_msg>Add test for nova-compute and nova-network main database blocks
We block the database objects when conductor is not local for compute
and network, but we don't test this code anywhere because it's in the
main() function of the actual executable.
Fix that.
Change-Id: I5b9343d30e6b4aedb05f0731ba9bdca51d408ba9<commit_after># Copyright 2016 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import mock
from nova.cmd import compute
from nova.cmd import network
from nova import db
from nova import exception
from nova import test
@contextlib.contextmanager
def restore_db():
orig = db.api.IMPL
try:
yield
finally:
db.api.IMPL = orig
class ComputeMainTest(test.NoDBTestCase):
@mock.patch('nova.utils.monkey_patch')
@mock.patch('nova.conductor.api.API.wait_until_ready')
@mock.patch('oslo_reports.guru_meditation_report')
def _call_main(self, mod, gmr, cond, patch):
@mock.patch.object(mod, 'config')
@mock.patch.object(mod, 'service')
def run_main(serv, conf):
mod.main()
run_main()
def test_compute_main_blocks_db(self):
with restore_db():
self._call_main(compute)
self.assertRaises(exception.DBNotAllowed,
db.api.instance_get, 1, 2)
def test_network_main_blocks_db(self):
with restore_db():
self._call_main(network)
self.assertRaises(exception.DBNotAllowed,
db.api.instance_get, 1, 2)
|
|
86c13905a616fe74ea1264b3e462ada3ca7b4e04
|
tests/test_clickthrough.py
|
tests/test_clickthrough.py
|
import openliveq as olq
import os
class TestClickthrough(object):
def test_load(self):
filepath = os.path.join(os.path.dirname(__file__),
"fixtures", "sample_clickthrough.tsv")
cs = []
with open(filepath) as f:
for line in f:
c = olq.Clickthrough.readline(line)
cs.append(c)
assert cs[0].query_id == 'OLQ-9998'
assert cs[0].question_id == '1167627151'
assert cs[0].rank == 1
assert cs[0].ctr == 0.5
assert cs[0].male == 0.4
assert cs[0].female == 0.6
assert cs[0].a00 == 0.1
assert cs[0].a10 == 0.1
assert cs[0].a20 == 0.1
assert cs[0].a30 == 0.1
assert cs[0].a40 == 0.1
assert cs[0].a50 == 0.1
assert cs[0].a60 == 0.4
assert cs[2].query_id == 'OLQ-9999'
assert cs[2].question_id == '1414846259'
assert cs[2].rank == 2
assert cs[2].ctr == 0.2
assert cs[2].male == 0.5
assert cs[2].female == 0.5
assert cs[2].a00 == 0.1
assert cs[2].a10 == 0.1
assert cs[2].a20 == 0.1
assert cs[2].a30 == 0.1
assert cs[2].a40 == 0.2
assert cs[2].a50 == 0.2
assert cs[2].a60 == 0.2
|
Add a test for clickthrough
|
Add a test for clickthrough
|
Python
|
mit
|
mpkato/openliveq
|
Add a test for clickthrough
|
import openliveq as olq
import os
class TestClickthrough(object):
def test_load(self):
filepath = os.path.join(os.path.dirname(__file__),
"fixtures", "sample_clickthrough.tsv")
cs = []
with open(filepath) as f:
for line in f:
c = olq.Clickthrough.readline(line)
cs.append(c)
assert cs[0].query_id == 'OLQ-9998'
assert cs[0].question_id == '1167627151'
assert cs[0].rank == 1
assert cs[0].ctr == 0.5
assert cs[0].male == 0.4
assert cs[0].female == 0.6
assert cs[0].a00 == 0.1
assert cs[0].a10 == 0.1
assert cs[0].a20 == 0.1
assert cs[0].a30 == 0.1
assert cs[0].a40 == 0.1
assert cs[0].a50 == 0.1
assert cs[0].a60 == 0.4
assert cs[2].query_id == 'OLQ-9999'
assert cs[2].question_id == '1414846259'
assert cs[2].rank == 2
assert cs[2].ctr == 0.2
assert cs[2].male == 0.5
assert cs[2].female == 0.5
assert cs[2].a00 == 0.1
assert cs[2].a10 == 0.1
assert cs[2].a20 == 0.1
assert cs[2].a30 == 0.1
assert cs[2].a40 == 0.2
assert cs[2].a50 == 0.2
assert cs[2].a60 == 0.2
|
<commit_before><commit_msg>Add a test for clickthrough<commit_after>
|
import openliveq as olq
import os
class TestClickthrough(object):
def test_load(self):
filepath = os.path.join(os.path.dirname(__file__),
"fixtures", "sample_clickthrough.tsv")
cs = []
with open(filepath) as f:
for line in f:
c = olq.Clickthrough.readline(line)
cs.append(c)
assert cs[0].query_id == 'OLQ-9998'
assert cs[0].question_id == '1167627151'
assert cs[0].rank == 1
assert cs[0].ctr == 0.5
assert cs[0].male == 0.4
assert cs[0].female == 0.6
assert cs[0].a00 == 0.1
assert cs[0].a10 == 0.1
assert cs[0].a20 == 0.1
assert cs[0].a30 == 0.1
assert cs[0].a40 == 0.1
assert cs[0].a50 == 0.1
assert cs[0].a60 == 0.4
assert cs[2].query_id == 'OLQ-9999'
assert cs[2].question_id == '1414846259'
assert cs[2].rank == 2
assert cs[2].ctr == 0.2
assert cs[2].male == 0.5
assert cs[2].female == 0.5
assert cs[2].a00 == 0.1
assert cs[2].a10 == 0.1
assert cs[2].a20 == 0.1
assert cs[2].a30 == 0.1
assert cs[2].a40 == 0.2
assert cs[2].a50 == 0.2
assert cs[2].a60 == 0.2
|
Add a test for clickthroughimport openliveq as olq
import os
class TestClickthrough(object):
def test_load(self):
filepath = os.path.join(os.path.dirname(__file__),
"fixtures", "sample_clickthrough.tsv")
cs = []
with open(filepath) as f:
for line in f:
c = olq.Clickthrough.readline(line)
cs.append(c)
assert cs[0].query_id == 'OLQ-9998'
assert cs[0].question_id == '1167627151'
assert cs[0].rank == 1
assert cs[0].ctr == 0.5
assert cs[0].male == 0.4
assert cs[0].female == 0.6
assert cs[0].a00 == 0.1
assert cs[0].a10 == 0.1
assert cs[0].a20 == 0.1
assert cs[0].a30 == 0.1
assert cs[0].a40 == 0.1
assert cs[0].a50 == 0.1
assert cs[0].a60 == 0.4
assert cs[2].query_id == 'OLQ-9999'
assert cs[2].question_id == '1414846259'
assert cs[2].rank == 2
assert cs[2].ctr == 0.2
assert cs[2].male == 0.5
assert cs[2].female == 0.5
assert cs[2].a00 == 0.1
assert cs[2].a10 == 0.1
assert cs[2].a20 == 0.1
assert cs[2].a30 == 0.1
assert cs[2].a40 == 0.2
assert cs[2].a50 == 0.2
assert cs[2].a60 == 0.2
|
<commit_before><commit_msg>Add a test for clickthrough<commit_after>import openliveq as olq
import os
class TestClickthrough(object):
def test_load(self):
filepath = os.path.join(os.path.dirname(__file__),
"fixtures", "sample_clickthrough.tsv")
cs = []
with open(filepath) as f:
for line in f:
c = olq.Clickthrough.readline(line)
cs.append(c)
assert cs[0].query_id == 'OLQ-9998'
assert cs[0].question_id == '1167627151'
assert cs[0].rank == 1
assert cs[0].ctr == 0.5
assert cs[0].male == 0.4
assert cs[0].female == 0.6
assert cs[0].a00 == 0.1
assert cs[0].a10 == 0.1
assert cs[0].a20 == 0.1
assert cs[0].a30 == 0.1
assert cs[0].a40 == 0.1
assert cs[0].a50 == 0.1
assert cs[0].a60 == 0.4
assert cs[2].query_id == 'OLQ-9999'
assert cs[2].question_id == '1414846259'
assert cs[2].rank == 2
assert cs[2].ctr == 0.2
assert cs[2].male == 0.5
assert cs[2].female == 0.5
assert cs[2].a00 == 0.1
assert cs[2].a10 == 0.1
assert cs[2].a20 == 0.1
assert cs[2].a30 == 0.1
assert cs[2].a40 == 0.2
assert cs[2].a50 == 0.2
assert cs[2].a60 == 0.2
|
|
8c4e58fac4d1d020ac2da38441067959100690a5
|
yunity/tests/integration/test_python.py
|
yunity/tests/integration/test_python.py
|
from importlib import import_module, reload
from os.path import join as join_path, dirname
from os import walk
from sys import modules
from yunity.utils.tests.abc import BaseTestCase
import yunity
def _path_to_module(path, root_module_path, pysuffix='.py'):
path = path[len(dirname(root_module_path)) + 1:-len(pysuffix)]
path = path.replace('/', '.')
return path
def iter_modules(root_module_path, excludes=None, pysuffix='.py'):
def is_module(_):
return _.endswith(pysuffix) and not _.startswith('__init__')
def is_blacklisted(_):
return excludes and any(_.startswith(exclude) for exclude in excludes)
for root, _, leaves in walk(root_module_path):
for leaf in filter(is_module, leaves):
module = _path_to_module(join_path(root, leaf), root_module_path)
if not is_blacklisted(module):
yield module
def import_or_reload(resource):
module = modules.get(resource)
if module:
return reload(module)
else:
return import_module(resource)
class PytonIsValidTestCase(BaseTestCase):
def test_all_modules_import_cleanly(self):
self.given_data(root_module_path=yunity.__path__[0])
self.given_data(excludes={
'yunity.resources', # intgration test data files have side-effects
'yunity.tests.integration.test_integration', # integration test runner has side-effects
'yunity.management.commands.create_sample_data', # sample data command has side-effects
})
self.when_importing_modules()
self.then_all_modules_import_cleanly()
def when_importing_modules(self):
self.exception = []
for module in iter_modules(*self.args, **self.kwargs):
try:
import_or_reload(module)
except Exception as e:
self.exception.append((module, e))
def then_all_modules_import_cleanly(self):
for module, exception in self.exception:
self.fail('{} did not import cleanly: {}'.format(module, exception.args[0]))
|
Add expectation that all Python code is correct
|
Add expectation that all Python code is correct
|
Python
|
agpl-3.0
|
yunity/yunity-core,yunity/foodsaving-backend,yunity/foodsaving-backend,yunity/foodsaving-backend,yunity/yunity-core
|
Add expectation that all Python code is correct
|
from importlib import import_module, reload
from os.path import join as join_path, dirname
from os import walk
from sys import modules
from yunity.utils.tests.abc import BaseTestCase
import yunity
def _path_to_module(path, root_module_path, pysuffix='.py'):
path = path[len(dirname(root_module_path)) + 1:-len(pysuffix)]
path = path.replace('/', '.')
return path
def iter_modules(root_module_path, excludes=None, pysuffix='.py'):
def is_module(_):
return _.endswith(pysuffix) and not _.startswith('__init__')
def is_blacklisted(_):
return excludes and any(_.startswith(exclude) for exclude in excludes)
for root, _, leaves in walk(root_module_path):
for leaf in filter(is_module, leaves):
module = _path_to_module(join_path(root, leaf), root_module_path)
if not is_blacklisted(module):
yield module
def import_or_reload(resource):
module = modules.get(resource)
if module:
return reload(module)
else:
return import_module(resource)
class PytonIsValidTestCase(BaseTestCase):
def test_all_modules_import_cleanly(self):
self.given_data(root_module_path=yunity.__path__[0])
self.given_data(excludes={
'yunity.resources', # intgration test data files have side-effects
'yunity.tests.integration.test_integration', # integration test runner has side-effects
'yunity.management.commands.create_sample_data', # sample data command has side-effects
})
self.when_importing_modules()
self.then_all_modules_import_cleanly()
def when_importing_modules(self):
self.exception = []
for module in iter_modules(*self.args, **self.kwargs):
try:
import_or_reload(module)
except Exception as e:
self.exception.append((module, e))
def then_all_modules_import_cleanly(self):
for module, exception in self.exception:
self.fail('{} did not import cleanly: {}'.format(module, exception.args[0]))
|
<commit_before><commit_msg>Add expectation that all Python code is correct<commit_after>
|
from importlib import import_module, reload
from os.path import join as join_path, dirname
from os import walk
from sys import modules
from yunity.utils.tests.abc import BaseTestCase
import yunity
def _path_to_module(path, root_module_path, pysuffix='.py'):
path = path[len(dirname(root_module_path)) + 1:-len(pysuffix)]
path = path.replace('/', '.')
return path
def iter_modules(root_module_path, excludes=None, pysuffix='.py'):
def is_module(_):
return _.endswith(pysuffix) and not _.startswith('__init__')
def is_blacklisted(_):
return excludes and any(_.startswith(exclude) for exclude in excludes)
for root, _, leaves in walk(root_module_path):
for leaf in filter(is_module, leaves):
module = _path_to_module(join_path(root, leaf), root_module_path)
if not is_blacklisted(module):
yield module
def import_or_reload(resource):
module = modules.get(resource)
if module:
return reload(module)
else:
return import_module(resource)
class PytonIsValidTestCase(BaseTestCase):
def test_all_modules_import_cleanly(self):
self.given_data(root_module_path=yunity.__path__[0])
self.given_data(excludes={
'yunity.resources', # intgration test data files have side-effects
'yunity.tests.integration.test_integration', # integration test runner has side-effects
'yunity.management.commands.create_sample_data', # sample data command has side-effects
})
self.when_importing_modules()
self.then_all_modules_import_cleanly()
def when_importing_modules(self):
self.exception = []
for module in iter_modules(*self.args, **self.kwargs):
try:
import_or_reload(module)
except Exception as e:
self.exception.append((module, e))
def then_all_modules_import_cleanly(self):
for module, exception in self.exception:
self.fail('{} did not import cleanly: {}'.format(module, exception.args[0]))
|
Add expectation that all Python code is correctfrom importlib import import_module, reload
from os.path import join as join_path, dirname
from os import walk
from sys import modules
from yunity.utils.tests.abc import BaseTestCase
import yunity
def _path_to_module(path, root_module_path, pysuffix='.py'):
path = path[len(dirname(root_module_path)) + 1:-len(pysuffix)]
path = path.replace('/', '.')
return path
def iter_modules(root_module_path, excludes=None, pysuffix='.py'):
def is_module(_):
return _.endswith(pysuffix) and not _.startswith('__init__')
def is_blacklisted(_):
return excludes and any(_.startswith(exclude) for exclude in excludes)
for root, _, leaves in walk(root_module_path):
for leaf in filter(is_module, leaves):
module = _path_to_module(join_path(root, leaf), root_module_path)
if not is_blacklisted(module):
yield module
def import_or_reload(resource):
module = modules.get(resource)
if module:
return reload(module)
else:
return import_module(resource)
class PytonIsValidTestCase(BaseTestCase):
def test_all_modules_import_cleanly(self):
self.given_data(root_module_path=yunity.__path__[0])
self.given_data(excludes={
'yunity.resources', # intgration test data files have side-effects
'yunity.tests.integration.test_integration', # integration test runner has side-effects
'yunity.management.commands.create_sample_data', # sample data command has side-effects
})
self.when_importing_modules()
self.then_all_modules_import_cleanly()
def when_importing_modules(self):
self.exception = []
for module in iter_modules(*self.args, **self.kwargs):
try:
import_or_reload(module)
except Exception as e:
self.exception.append((module, e))
def then_all_modules_import_cleanly(self):
for module, exception in self.exception:
self.fail('{} did not import cleanly: {}'.format(module, exception.args[0]))
|
<commit_before><commit_msg>Add expectation that all Python code is correct<commit_after>from importlib import import_module, reload
from os.path import join as join_path, dirname
from os import walk
from sys import modules
from yunity.utils.tests.abc import BaseTestCase
import yunity
def _path_to_module(path, root_module_path, pysuffix='.py'):
path = path[len(dirname(root_module_path)) + 1:-len(pysuffix)]
path = path.replace('/', '.')
return path
def iter_modules(root_module_path, excludes=None, pysuffix='.py'):
def is_module(_):
return _.endswith(pysuffix) and not _.startswith('__init__')
def is_blacklisted(_):
return excludes and any(_.startswith(exclude) for exclude in excludes)
for root, _, leaves in walk(root_module_path):
for leaf in filter(is_module, leaves):
module = _path_to_module(join_path(root, leaf), root_module_path)
if not is_blacklisted(module):
yield module
def import_or_reload(resource):
module = modules.get(resource)
if module:
return reload(module)
else:
return import_module(resource)
class PytonIsValidTestCase(BaseTestCase):
def test_all_modules_import_cleanly(self):
self.given_data(root_module_path=yunity.__path__[0])
self.given_data(excludes={
'yunity.resources', # intgration test data files have side-effects
'yunity.tests.integration.test_integration', # integration test runner has side-effects
'yunity.management.commands.create_sample_data', # sample data command has side-effects
})
self.when_importing_modules()
self.then_all_modules_import_cleanly()
def when_importing_modules(self):
self.exception = []
for module in iter_modules(*self.args, **self.kwargs):
try:
import_or_reload(module)
except Exception as e:
self.exception.append((module, e))
def then_all_modules_import_cleanly(self):
for module, exception in self.exception:
self.fail('{} did not import cleanly: {}'.format(module, exception.args[0]))
|
|
87fdc8ab59baa989d57c482085d67fb139573313
|
test/test_get_name.py
|
test/test_get_name.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2015: Alignak team, see AUTHORS.txt file for contributors
#
# This file is part of Alignak.
#
# Alignak is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Alignak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Alignak. If not, see <http://www.gnu.org/licenses/>.
import unittest2 as unittest
from alignak.objects.arbiterlink import ArbiterLink
from alignak.objects.schedulerlink import SchedulerLink
from alignak.objects.brokerlink import BrokerLink
from alignak.objects.reactionnerlink import ReactionnerLink
from alignak.objects.receiverlink import ReceiverLink
from alignak.objects.pollerlink import PollerLink
class template_DaemonLink_get_name():
def get_link(self):
cls = self.daemon_link
return cls({})
def test_get_name(self):
link = self.get_link()
try:
self.assertEquals("Unnamed {0}".format(self.daemon_link.my_type), link.get_name())
except AttributeError:
self.assertTrue(False, "get_name should not raise AttributeError")
class Test_ArbiterLink_get_name(template_DaemonLink_get_name, unittest.TestCase):
daemon_link = ArbiterLink
class Test_SchedulerLink_get_name(template_DaemonLink_get_name, unittest.TestCase):
daemon_link = SchedulerLink
class Test_BrokerLink_get_name(template_DaemonLink_get_name, unittest.TestCase):
daemon_link = BrokerLink
class Test_ReactionnerLink_get_name(template_DaemonLink_get_name, unittest.TestCase):
daemon_link = ReactionnerLink
class Test_ReceiverLink_get_name(template_DaemonLink_get_name, unittest.TestCase):
daemon_link = ReceiverLink
class Test_PollerLink_get_name(template_DaemonLink_get_name, unittest.TestCase):
daemon_link = PollerLink
if __name__ == '__main__':
unittest.main()
|
Test - Trigger AttributeError with an get_name call in daemonlinks
|
Enh: Test - Trigger AttributeError with an get_name call in daemonlinks
|
Python
|
agpl-3.0
|
titilambert/alignak,Alignak-monitoring/alignak,gst/alignak,titilambert/alignak,Alignak-monitoring/alignak,gst/alignak,gst/alignak,titilambert/alignak,titilambert/alignak,gst/alignak
|
Enh: Test - Trigger AttributeError with an get_name call in daemonlinks
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2015: Alignak team, see AUTHORS.txt file for contributors
#
# This file is part of Alignak.
#
# Alignak is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Alignak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Alignak. If not, see <http://www.gnu.org/licenses/>.
import unittest2 as unittest
from alignak.objects.arbiterlink import ArbiterLink
from alignak.objects.schedulerlink import SchedulerLink
from alignak.objects.brokerlink import BrokerLink
from alignak.objects.reactionnerlink import ReactionnerLink
from alignak.objects.receiverlink import ReceiverLink
from alignak.objects.pollerlink import PollerLink
class template_DaemonLink_get_name():
def get_link(self):
cls = self.daemon_link
return cls({})
def test_get_name(self):
link = self.get_link()
try:
self.assertEquals("Unnamed {0}".format(self.daemon_link.my_type), link.get_name())
except AttributeError:
self.assertTrue(False, "get_name should not raise AttributeError")
class Test_ArbiterLink_get_name(template_DaemonLink_get_name, unittest.TestCase):
daemon_link = ArbiterLink
class Test_SchedulerLink_get_name(template_DaemonLink_get_name, unittest.TestCase):
daemon_link = SchedulerLink
class Test_BrokerLink_get_name(template_DaemonLink_get_name, unittest.TestCase):
daemon_link = BrokerLink
class Test_ReactionnerLink_get_name(template_DaemonLink_get_name, unittest.TestCase):
daemon_link = ReactionnerLink
class Test_ReceiverLink_get_name(template_DaemonLink_get_name, unittest.TestCase):
daemon_link = ReceiverLink
class Test_PollerLink_get_name(template_DaemonLink_get_name, unittest.TestCase):
daemon_link = PollerLink
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Enh: Test - Trigger AttributeError with an get_name call in daemonlinks<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2015: Alignak team, see AUTHORS.txt file for contributors
#
# This file is part of Alignak.
#
# Alignak is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Alignak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Alignak. If not, see <http://www.gnu.org/licenses/>.
import unittest2 as unittest
from alignak.objects.arbiterlink import ArbiterLink
from alignak.objects.schedulerlink import SchedulerLink
from alignak.objects.brokerlink import BrokerLink
from alignak.objects.reactionnerlink import ReactionnerLink
from alignak.objects.receiverlink import ReceiverLink
from alignak.objects.pollerlink import PollerLink
class template_DaemonLink_get_name():
def get_link(self):
cls = self.daemon_link
return cls({})
def test_get_name(self):
link = self.get_link()
try:
self.assertEquals("Unnamed {0}".format(self.daemon_link.my_type), link.get_name())
except AttributeError:
self.assertTrue(False, "get_name should not raise AttributeError")
class Test_ArbiterLink_get_name(template_DaemonLink_get_name, unittest.TestCase):
daemon_link = ArbiterLink
class Test_SchedulerLink_get_name(template_DaemonLink_get_name, unittest.TestCase):
daemon_link = SchedulerLink
class Test_BrokerLink_get_name(template_DaemonLink_get_name, unittest.TestCase):
daemon_link = BrokerLink
class Test_ReactionnerLink_get_name(template_DaemonLink_get_name, unittest.TestCase):
daemon_link = ReactionnerLink
class Test_ReceiverLink_get_name(template_DaemonLink_get_name, unittest.TestCase):
daemon_link = ReceiverLink
class Test_PollerLink_get_name(template_DaemonLink_get_name, unittest.TestCase):
daemon_link = PollerLink
if __name__ == '__main__':
unittest.main()
|
Enh: Test - Trigger AttributeError with an get_name call in daemonlinks#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2015: Alignak team, see AUTHORS.txt file for contributors
#
# This file is part of Alignak.
#
# Alignak is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Alignak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Alignak. If not, see <http://www.gnu.org/licenses/>.
import unittest2 as unittest
from alignak.objects.arbiterlink import ArbiterLink
from alignak.objects.schedulerlink import SchedulerLink
from alignak.objects.brokerlink import BrokerLink
from alignak.objects.reactionnerlink import ReactionnerLink
from alignak.objects.receiverlink import ReceiverLink
from alignak.objects.pollerlink import PollerLink
class template_DaemonLink_get_name():
def get_link(self):
cls = self.daemon_link
return cls({})
def test_get_name(self):
link = self.get_link()
try:
self.assertEquals("Unnamed {0}".format(self.daemon_link.my_type), link.get_name())
except AttributeError:
self.assertTrue(False, "get_name should not raise AttributeError")
class Test_ArbiterLink_get_name(template_DaemonLink_get_name, unittest.TestCase):
daemon_link = ArbiterLink
class Test_SchedulerLink_get_name(template_DaemonLink_get_name, unittest.TestCase):
daemon_link = SchedulerLink
class Test_BrokerLink_get_name(template_DaemonLink_get_name, unittest.TestCase):
daemon_link = BrokerLink
class Test_ReactionnerLink_get_name(template_DaemonLink_get_name, unittest.TestCase):
daemon_link = ReactionnerLink
class Test_ReceiverLink_get_name(template_DaemonLink_get_name, unittest.TestCase):
daemon_link = ReceiverLink
class Test_PollerLink_get_name(template_DaemonLink_get_name, unittest.TestCase):
daemon_link = PollerLink
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Enh: Test - Trigger AttributeError with an get_name call in daemonlinks<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2015: Alignak team, see AUTHORS.txt file for contributors
#
# This file is part of Alignak.
#
# Alignak is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Alignak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Alignak. If not, see <http://www.gnu.org/licenses/>.
import unittest2 as unittest
from alignak.objects.arbiterlink import ArbiterLink
from alignak.objects.schedulerlink import SchedulerLink
from alignak.objects.brokerlink import BrokerLink
from alignak.objects.reactionnerlink import ReactionnerLink
from alignak.objects.receiverlink import ReceiverLink
from alignak.objects.pollerlink import PollerLink
class template_DaemonLink_get_name():
def get_link(self):
cls = self.daemon_link
return cls({})
def test_get_name(self):
link = self.get_link()
try:
self.assertEquals("Unnamed {0}".format(self.daemon_link.my_type), link.get_name())
except AttributeError:
self.assertTrue(False, "get_name should not raise AttributeError")
class Test_ArbiterLink_get_name(template_DaemonLink_get_name, unittest.TestCase):
daemon_link = ArbiterLink
class Test_SchedulerLink_get_name(template_DaemonLink_get_name, unittest.TestCase):
daemon_link = SchedulerLink
class Test_BrokerLink_get_name(template_DaemonLink_get_name, unittest.TestCase):
daemon_link = BrokerLink
class Test_ReactionnerLink_get_name(template_DaemonLink_get_name, unittest.TestCase):
daemon_link = ReactionnerLink
class Test_ReceiverLink_get_name(template_DaemonLink_get_name, unittest.TestCase):
daemon_link = ReceiverLink
class Test_PollerLink_get_name(template_DaemonLink_get_name, unittest.TestCase):
daemon_link = PollerLink
if __name__ == '__main__':
unittest.main()
|
|
d0f92caf504e78a3fd7257ac9fab1fbd9c039212
|
distarray/tests/test_testing.py
|
distarray/tests/test_testing.py
|
# encoding: utf-8
# ---------------------------------------------------------------------------
# Copyright (C) 2008-2014, IPython Development Team and Enthought, Inc.
# Distributed under the terms of the BSD License. See COPYING.rst.
# ---------------------------------------------------------------------------
import unittest
from distarray import testing
class TestRaiseTypeError(unittest.TestCase):
def test_good_dim_dict(self):
dim_dict = {}
success, msg = testing.validate_dim_dict(3, dim_dict)
self.assertTrue(success)
def test_good_bad_dim_dict(self):
dim_dict = {'dist_type': 'b'}
with self.assertRaises(TypeError):
testing.validate_dim_dict(3, dim_dict)
def test_good_dim_data(self):
dim_data = ({}, {}, {})
success, msg = testing.validate_dim_data(dim_data)
self.assertTrue(success)
def test_good_bad_dim_data(self):
dim_data = ({'dist_type': 'b'}, {}, {})
with self.assertRaises(TypeError):
testing.validate_dim_data(dim_data)
def test_good_distbuffer(self):
dim_data = ({},)
distbuffer = dict(__version__='0.10.0',
buffer=bytearray([1,2,3,4]),
dim_data=dim_data)
success, msg = testing.validate_distbuffer(distbuffer)
self.assertTrue(success)
def test_bad_distbuffer(self):
dim_data = ({},)
distbuffer = dict(__venison__='0.10.0',
biffer=bytearray([1,2,3,4]),
dim_doodle=dim_data)
with self.assertRaises(TypeError):
testing.validate_distbuffer(distbuffer)
|
Add simple tests for DAP validator wrappers.
|
Add simple tests for DAP validator wrappers.
Incidentally, add the test file `distarray/tests/test_testing.py`.
|
Python
|
bsd-3-clause
|
enthought/distarray,RaoUmer/distarray,enthought/distarray,RaoUmer/distarray
|
Add simple tests for DAP validator wrappers.
Incidentally, add the test file `distarray/tests/test_testing.py`.
|
# encoding: utf-8
# ---------------------------------------------------------------------------
# Copyright (C) 2008-2014, IPython Development Team and Enthought, Inc.
# Distributed under the terms of the BSD License. See COPYING.rst.
# ---------------------------------------------------------------------------
import unittest
from distarray import testing
class TestRaiseTypeError(unittest.TestCase):
def test_good_dim_dict(self):
dim_dict = {}
success, msg = testing.validate_dim_dict(3, dim_dict)
self.assertTrue(success)
def test_good_bad_dim_dict(self):
dim_dict = {'dist_type': 'b'}
with self.assertRaises(TypeError):
testing.validate_dim_dict(3, dim_dict)
def test_good_dim_data(self):
dim_data = ({}, {}, {})
success, msg = testing.validate_dim_data(dim_data)
self.assertTrue(success)
def test_good_bad_dim_data(self):
dim_data = ({'dist_type': 'b'}, {}, {})
with self.assertRaises(TypeError):
testing.validate_dim_data(dim_data)
def test_good_distbuffer(self):
dim_data = ({},)
distbuffer = dict(__version__='0.10.0',
buffer=bytearray([1,2,3,4]),
dim_data=dim_data)
success, msg = testing.validate_distbuffer(distbuffer)
self.assertTrue(success)
def test_bad_distbuffer(self):
dim_data = ({},)
distbuffer = dict(__venison__='0.10.0',
biffer=bytearray([1,2,3,4]),
dim_doodle=dim_data)
with self.assertRaises(TypeError):
testing.validate_distbuffer(distbuffer)
|
<commit_before><commit_msg>Add simple tests for DAP validator wrappers.
Incidentally, add the test file `distarray/tests/test_testing.py`.<commit_after>
|
# encoding: utf-8
# ---------------------------------------------------------------------------
# Copyright (C) 2008-2014, IPython Development Team and Enthought, Inc.
# Distributed under the terms of the BSD License. See COPYING.rst.
# ---------------------------------------------------------------------------
import unittest
from distarray import testing
class TestRaiseTypeError(unittest.TestCase):
def test_good_dim_dict(self):
dim_dict = {}
success, msg = testing.validate_dim_dict(3, dim_dict)
self.assertTrue(success)
def test_good_bad_dim_dict(self):
dim_dict = {'dist_type': 'b'}
with self.assertRaises(TypeError):
testing.validate_dim_dict(3, dim_dict)
def test_good_dim_data(self):
dim_data = ({}, {}, {})
success, msg = testing.validate_dim_data(dim_data)
self.assertTrue(success)
def test_good_bad_dim_data(self):
dim_data = ({'dist_type': 'b'}, {}, {})
with self.assertRaises(TypeError):
testing.validate_dim_data(dim_data)
def test_good_distbuffer(self):
dim_data = ({},)
distbuffer = dict(__version__='0.10.0',
buffer=bytearray([1,2,3,4]),
dim_data=dim_data)
success, msg = testing.validate_distbuffer(distbuffer)
self.assertTrue(success)
def test_bad_distbuffer(self):
dim_data = ({},)
distbuffer = dict(__venison__='0.10.0',
biffer=bytearray([1,2,3,4]),
dim_doodle=dim_data)
with self.assertRaises(TypeError):
testing.validate_distbuffer(distbuffer)
|
Add simple tests for DAP validator wrappers.
Incidentally, add the test file `distarray/tests/test_testing.py`.# encoding: utf-8
# ---------------------------------------------------------------------------
# Copyright (C) 2008-2014, IPython Development Team and Enthought, Inc.
# Distributed under the terms of the BSD License. See COPYING.rst.
# ---------------------------------------------------------------------------
import unittest
from distarray import testing
class TestRaiseTypeError(unittest.TestCase):
def test_good_dim_dict(self):
dim_dict = {}
success, msg = testing.validate_dim_dict(3, dim_dict)
self.assertTrue(success)
def test_good_bad_dim_dict(self):
dim_dict = {'dist_type': 'b'}
with self.assertRaises(TypeError):
testing.validate_dim_dict(3, dim_dict)
def test_good_dim_data(self):
dim_data = ({}, {}, {})
success, msg = testing.validate_dim_data(dim_data)
self.assertTrue(success)
def test_good_bad_dim_data(self):
dim_data = ({'dist_type': 'b'}, {}, {})
with self.assertRaises(TypeError):
testing.validate_dim_data(dim_data)
def test_good_distbuffer(self):
dim_data = ({},)
distbuffer = dict(__version__='0.10.0',
buffer=bytearray([1,2,3,4]),
dim_data=dim_data)
success, msg = testing.validate_distbuffer(distbuffer)
self.assertTrue(success)
def test_bad_distbuffer(self):
dim_data = ({},)
distbuffer = dict(__venison__='0.10.0',
biffer=bytearray([1,2,3,4]),
dim_doodle=dim_data)
with self.assertRaises(TypeError):
testing.validate_distbuffer(distbuffer)
|
<commit_before><commit_msg>Add simple tests for DAP validator wrappers.
Incidentally, add the test file `distarray/tests/test_testing.py`.<commit_after># encoding: utf-8
# ---------------------------------------------------------------------------
# Copyright (C) 2008-2014, IPython Development Team and Enthought, Inc.
# Distributed under the terms of the BSD License. See COPYING.rst.
# ---------------------------------------------------------------------------
import unittest
from distarray import testing
class TestRaiseTypeError(unittest.TestCase):
def test_good_dim_dict(self):
dim_dict = {}
success, msg = testing.validate_dim_dict(3, dim_dict)
self.assertTrue(success)
def test_good_bad_dim_dict(self):
dim_dict = {'dist_type': 'b'}
with self.assertRaises(TypeError):
testing.validate_dim_dict(3, dim_dict)
def test_good_dim_data(self):
dim_data = ({}, {}, {})
success, msg = testing.validate_dim_data(dim_data)
self.assertTrue(success)
def test_good_bad_dim_data(self):
dim_data = ({'dist_type': 'b'}, {}, {})
with self.assertRaises(TypeError):
testing.validate_dim_data(dim_data)
def test_good_distbuffer(self):
dim_data = ({},)
distbuffer = dict(__version__='0.10.0',
buffer=bytearray([1,2,3,4]),
dim_data=dim_data)
success, msg = testing.validate_distbuffer(distbuffer)
self.assertTrue(success)
def test_bad_distbuffer(self):
dim_data = ({},)
distbuffer = dict(__venison__='0.10.0',
biffer=bytearray([1,2,3,4]),
dim_doodle=dim_data)
with self.assertRaises(TypeError):
testing.validate_distbuffer(distbuffer)
|
|
6dfa189bdab536ecfa2c14e4893017363923ee6a
|
bayes.py
|
bayes.py
|
import numpy as np
import cv2
# pos and neg are positive and negative instances
# each is a list of files of nparray dumps,
# nparray of BoW histograms; shape = (n, 101)
# of the class to be trained for
def build_trained_classifier(pos_files, neg_files):
total = len(pos_files) + len(neg_files)
samples = np.empty((total, 101), np.float32)
i = 0
for pos_file in pos_files:
samples[i] = np.load(pos_file)
i = i + 1
for neg_file in neg_files:
samples[i] = np.load(neg_file)
i = i + 1
labels = np.empty((total, 1), np.float32)
labels[0:len(pos_files), 0] = 1.0
labels[len(pos_files):, 0] = 0.0
return cv2.NormalBayesClassifier(samples, labels)
|
Implement Naive Bayes Classifier builder method
|
Implement Naive Bayes Classifier builder method
|
Python
|
mit
|
ah450/ObjectRecognizer
|
Implement Naive Bayes Classifier builder method
|
import numpy as np
import cv2
# pos and neg are positive and negative instances
# each is a list of files of nparray dumps,
# nparray of BoW histograms; shape = (n, 101)
# of the class to be trained for
def build_trained_classifier(pos_files, neg_files):
total = len(pos_files) + len(neg_files)
samples = np.empty((total, 101), np.float32)
i = 0
for pos_file in pos_files:
samples[i] = np.load(pos_file)
i = i + 1
for neg_file in neg_files:
samples[i] = np.load(neg_file)
i = i + 1
labels = np.empty((total, 1), np.float32)
labels[0:len(pos_files), 0] = 1.0
labels[len(pos_files):, 0] = 0.0
return cv2.NormalBayesClassifier(samples, labels)
|
<commit_before><commit_msg>Implement Naive Bayes Classifier builder method<commit_after>
|
import numpy as np
import cv2
# pos and neg are positive and negative instances
# each is a list of files of nparray dumps,
# nparray of BoW histograms; shape = (n, 101)
# of the class to be trained for
def build_trained_classifier(pos_files, neg_files):
total = len(pos_files) + len(neg_files)
samples = np.empty((total, 101), np.float32)
i = 0
for pos_file in pos_files:
samples[i] = np.load(pos_file)
i = i + 1
for neg_file in neg_files:
samples[i] = np.load(neg_file)
i = i + 1
labels = np.empty((total, 1), np.float32)
labels[0:len(pos_files), 0] = 1.0
labels[len(pos_files):, 0] = 0.0
return cv2.NormalBayesClassifier(samples, labels)
|
Implement Naive Bayes Classifier builder methodimport numpy as np
import cv2
# pos and neg are positive and negative instances
# each is a list of files of nparray dumps,
# nparray of BoW histograms; shape = (n, 101)
# of the class to be trained for
def build_trained_classifier(pos_files, neg_files):
total = len(pos_files) + len(neg_files)
samples = np.empty((total, 101), np.float32)
i = 0
for pos_file in pos_files:
samples[i] = np.load(pos_file)
i = i + 1
for neg_file in neg_files:
samples[i] = np.load(neg_file)
i = i + 1
labels = np.empty((total, 1), np.float32)
labels[0:len(pos_files), 0] = 1.0
labels[len(pos_files):, 0] = 0.0
return cv2.NormalBayesClassifier(samples, labels)
|
<commit_before><commit_msg>Implement Naive Bayes Classifier builder method<commit_after>import numpy as np
import cv2
# pos and neg are positive and negative instances
# each is a list of files of nparray dumps,
# nparray of BoW histograms; shape = (n, 101)
# of the class to be trained for
def build_trained_classifier(pos_files, neg_files):
total = len(pos_files) + len(neg_files)
samples = np.empty((total, 101), np.float32)
i = 0
for pos_file in pos_files:
samples[i] = np.load(pos_file)
i = i + 1
for neg_file in neg_files:
samples[i] = np.load(neg_file)
i = i + 1
labels = np.empty((total, 1), np.float32)
labels[0:len(pos_files), 0] = 1.0
labels[len(pos_files):, 0] = 0.0
return cv2.NormalBayesClassifier(samples, labels)
|
|
71b6246dda3e4812490a5c2936eac44e063806c0
|
tests/test_sonify.py
|
tests/test_sonify.py
|
""" Unit tests for sonification methods """
import mir_eval
import numpy as np
def test_clicks():
# Test output length for a variety of parameter settings
for times in [np.array([1.]), np.arange(10)*1.]:
for fs in [8000, 44100]:
click_signal = mir_eval.sonify.clicks(times, fs)
assert len(click_signal) == times.max()*fs + int(fs*.1) + 1
click_signal = mir_eval.sonify.clicks(times, fs, length=1000)
assert len(click_signal) == 1000
click_signal = mir_eval.sonify.clicks(
times, fs, click=np.zeros(1000))
assert len(click_signal) == times.max()*fs + 1000 + 1
def test_time_frequency():
# Test length for different inputs
for fs in [8000, 44100]:
signal = mir_eval.sonify.time_frequency(
np.random.standard_normal((100, 1000)), np.arange(1, 101),
np.linspace(0, 10, 1000), fs)
assert len(signal) == 10*fs
signal = mir_eval.sonify.time_frequency(
np.random.standard_normal((100, 1000)), np.arange(1, 101),
np.linspace(0, 10, 1000), fs, length=fs*11)
assert len(signal) == 11*fs
def test_chroma():
for fs in [8000, 44100]:
signal = mir_eval.sonify.chroma(
np.random.standard_normal((12, 1000)),
np.linspace(0, 10, 1000), fs)
assert len(signal) == 10*fs
def test_chords():
for fs in [8000, 44100]:
intervals = np.array([np.arange(10), np.arange(1, 11)]).T
signal = mir_eval.sonify.chords(
['C', 'C:maj', 'D:min7', 'E:min', 'C#', 'C', 'C', 'C', 'C', 'C'],
intervals, fs)
assert len(signal) == 10*fs
|
Add tests for sonify submodule
|
Add tests for sonify submodule
|
Python
|
mit
|
faroit/mir_eval,faroit/mir_eval,bmcfee/mir_eval,craffel/mir_eval,bmcfee/mir_eval,rabitt/mir_eval,craffel/mir_eval,rabitt/mir_eval
|
Add tests for sonify submodule
|
""" Unit tests for sonification methods """
import mir_eval
import numpy as np
def test_clicks():
# Test output length for a variety of parameter settings
for times in [np.array([1.]), np.arange(10)*1.]:
for fs in [8000, 44100]:
click_signal = mir_eval.sonify.clicks(times, fs)
assert len(click_signal) == times.max()*fs + int(fs*.1) + 1
click_signal = mir_eval.sonify.clicks(times, fs, length=1000)
assert len(click_signal) == 1000
click_signal = mir_eval.sonify.clicks(
times, fs, click=np.zeros(1000))
assert len(click_signal) == times.max()*fs + 1000 + 1
def test_time_frequency():
# Test length for different inputs
for fs in [8000, 44100]:
signal = mir_eval.sonify.time_frequency(
np.random.standard_normal((100, 1000)), np.arange(1, 101),
np.linspace(0, 10, 1000), fs)
assert len(signal) == 10*fs
signal = mir_eval.sonify.time_frequency(
np.random.standard_normal((100, 1000)), np.arange(1, 101),
np.linspace(0, 10, 1000), fs, length=fs*11)
assert len(signal) == 11*fs
def test_chroma():
for fs in [8000, 44100]:
signal = mir_eval.sonify.chroma(
np.random.standard_normal((12, 1000)),
np.linspace(0, 10, 1000), fs)
assert len(signal) == 10*fs
def test_chords():
for fs in [8000, 44100]:
intervals = np.array([np.arange(10), np.arange(1, 11)]).T
signal = mir_eval.sonify.chords(
['C', 'C:maj', 'D:min7', 'E:min', 'C#', 'C', 'C', 'C', 'C', 'C'],
intervals, fs)
assert len(signal) == 10*fs
|
<commit_before><commit_msg>Add tests for sonify submodule<commit_after>
|
""" Unit tests for sonification methods """
import mir_eval
import numpy as np
def test_clicks():
# Test output length for a variety of parameter settings
for times in [np.array([1.]), np.arange(10)*1.]:
for fs in [8000, 44100]:
click_signal = mir_eval.sonify.clicks(times, fs)
assert len(click_signal) == times.max()*fs + int(fs*.1) + 1
click_signal = mir_eval.sonify.clicks(times, fs, length=1000)
assert len(click_signal) == 1000
click_signal = mir_eval.sonify.clicks(
times, fs, click=np.zeros(1000))
assert len(click_signal) == times.max()*fs + 1000 + 1
def test_time_frequency():
# Test length for different inputs
for fs in [8000, 44100]:
signal = mir_eval.sonify.time_frequency(
np.random.standard_normal((100, 1000)), np.arange(1, 101),
np.linspace(0, 10, 1000), fs)
assert len(signal) == 10*fs
signal = mir_eval.sonify.time_frequency(
np.random.standard_normal((100, 1000)), np.arange(1, 101),
np.linspace(0, 10, 1000), fs, length=fs*11)
assert len(signal) == 11*fs
def test_chroma():
for fs in [8000, 44100]:
signal = mir_eval.sonify.chroma(
np.random.standard_normal((12, 1000)),
np.linspace(0, 10, 1000), fs)
assert len(signal) == 10*fs
def test_chords():
for fs in [8000, 44100]:
intervals = np.array([np.arange(10), np.arange(1, 11)]).T
signal = mir_eval.sonify.chords(
['C', 'C:maj', 'D:min7', 'E:min', 'C#', 'C', 'C', 'C', 'C', 'C'],
intervals, fs)
assert len(signal) == 10*fs
|
Add tests for sonify submodule""" Unit tests for sonification methods """
import mir_eval
import numpy as np
def test_clicks():
# Test output length for a variety of parameter settings
for times in [np.array([1.]), np.arange(10)*1.]:
for fs in [8000, 44100]:
click_signal = mir_eval.sonify.clicks(times, fs)
assert len(click_signal) == times.max()*fs + int(fs*.1) + 1
click_signal = mir_eval.sonify.clicks(times, fs, length=1000)
assert len(click_signal) == 1000
click_signal = mir_eval.sonify.clicks(
times, fs, click=np.zeros(1000))
assert len(click_signal) == times.max()*fs + 1000 + 1
def test_time_frequency():
# Test length for different inputs
for fs in [8000, 44100]:
signal = mir_eval.sonify.time_frequency(
np.random.standard_normal((100, 1000)), np.arange(1, 101),
np.linspace(0, 10, 1000), fs)
assert len(signal) == 10*fs
signal = mir_eval.sonify.time_frequency(
np.random.standard_normal((100, 1000)), np.arange(1, 101),
np.linspace(0, 10, 1000), fs, length=fs*11)
assert len(signal) == 11*fs
def test_chroma():
for fs in [8000, 44100]:
signal = mir_eval.sonify.chroma(
np.random.standard_normal((12, 1000)),
np.linspace(0, 10, 1000), fs)
assert len(signal) == 10*fs
def test_chords():
for fs in [8000, 44100]:
intervals = np.array([np.arange(10), np.arange(1, 11)]).T
signal = mir_eval.sonify.chords(
['C', 'C:maj', 'D:min7', 'E:min', 'C#', 'C', 'C', 'C', 'C', 'C'],
intervals, fs)
assert len(signal) == 10*fs
|
<commit_before><commit_msg>Add tests for sonify submodule<commit_after>""" Unit tests for sonification methods """
import mir_eval
import numpy as np
def test_clicks():
# Test output length for a variety of parameter settings
for times in [np.array([1.]), np.arange(10)*1.]:
for fs in [8000, 44100]:
click_signal = mir_eval.sonify.clicks(times, fs)
assert len(click_signal) == times.max()*fs + int(fs*.1) + 1
click_signal = mir_eval.sonify.clicks(times, fs, length=1000)
assert len(click_signal) == 1000
click_signal = mir_eval.sonify.clicks(
times, fs, click=np.zeros(1000))
assert len(click_signal) == times.max()*fs + 1000 + 1
def test_time_frequency():
# Test length for different inputs
for fs in [8000, 44100]:
signal = mir_eval.sonify.time_frequency(
np.random.standard_normal((100, 1000)), np.arange(1, 101),
np.linspace(0, 10, 1000), fs)
assert len(signal) == 10*fs
signal = mir_eval.sonify.time_frequency(
np.random.standard_normal((100, 1000)), np.arange(1, 101),
np.linspace(0, 10, 1000), fs, length=fs*11)
assert len(signal) == 11*fs
def test_chroma():
for fs in [8000, 44100]:
signal = mir_eval.sonify.chroma(
np.random.standard_normal((12, 1000)),
np.linspace(0, 10, 1000), fs)
assert len(signal) == 10*fs
def test_chords():
for fs in [8000, 44100]:
intervals = np.array([np.arange(10), np.arange(1, 11)]).T
signal = mir_eval.sonify.chords(
['C', 'C:maj', 'D:min7', 'E:min', 'C#', 'C', 'C', 'C', 'C', 'C'],
intervals, fs)
assert len(signal) == 10*fs
|
|
f46361d1009665e87543b56d69212b04b9b14993
|
VehicleDetectionTracking/histo_colors.py
|
VehicleDetectionTracking/histo_colors.py
|
# Code given by Udacity, complete by Andres Guijarro
# Purpose: Define a function to compute color histogram features
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
image = mpimg.imread('cutout1.jpg')
# Define a function to compute color histogram features
def color_hist(img, nbins=32, bins_range=(0, 256)):
# Compute the histogram of the RGB channels separately
rhist = np.histogram(image[:, :, 0], bins=32, range=(0, 256))
ghist = np.histogram(image[:, :, 1], bins=32, range=(0, 256))
bhist = np.histogram(image[:, :, 2], bins=32, range=(0, 256))
# Generating bin centers
bin_edges = rhist[1]
bin_centers = (bin_edges[1:] + bin_edges[0:len(bin_edges) - 1]) / 2
# Concatenate the histograms into a single feature vector
hist_features = np.concatenate((rhist[0], ghist[0], bhist[0]))
# Return the individual histograms, bin_centers and feature vector
return rhist, ghist, bhist, bin_centers, hist_features
def main():
rh, gh, bh, bincen, feature_vec = color_hist(image,
nbins=32,
bins_range=(0, 256))
# Plot a figure with all three bar charts
if rh is not None:
fig = plt.figure(figsize=(12, 3))
plt.subplot(131)
plt.bar(bincen, rh[0])
plt.xlim(0, 256)
plt.title('R Histogram')
plt.subplot(132)
plt.bar(bincen, gh[0])
plt.xlim(0, 256)
plt.title('G Histogram')
plt.subplot(133)
plt.bar(bincen, bh[0])
plt.xlim(0, 256)
plt.title('B Histogram')
fig.tight_layout()
plt.show()
else:
print('Your function is returning None for at least one variable...')
if __name__ == '__main__':
main()
|
Add scripts which Define a function to compute color histogram features
|
feat: Add scripts which Define a function to compute color histogram features
|
Python
|
mit
|
aguijarro/SelfDrivingCar
|
feat: Add scripts which Define a function to compute color histogram features
|
# Code given by Udacity, complete by Andres Guijarro
# Purpose: Define a function to compute color histogram features
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
image = mpimg.imread('cutout1.jpg')
# Define a function to compute color histogram features
def color_hist(img, nbins=32, bins_range=(0, 256)):
# Compute the histogram of the RGB channels separately
rhist = np.histogram(image[:, :, 0], bins=32, range=(0, 256))
ghist = np.histogram(image[:, :, 1], bins=32, range=(0, 256))
bhist = np.histogram(image[:, :, 2], bins=32, range=(0, 256))
# Generating bin centers
bin_edges = rhist[1]
bin_centers = (bin_edges[1:] + bin_edges[0:len(bin_edges) - 1]) / 2
# Concatenate the histograms into a single feature vector
hist_features = np.concatenate((rhist[0], ghist[0], bhist[0]))
# Return the individual histograms, bin_centers and feature vector
return rhist, ghist, bhist, bin_centers, hist_features
def main():
rh, gh, bh, bincen, feature_vec = color_hist(image,
nbins=32,
bins_range=(0, 256))
# Plot a figure with all three bar charts
if rh is not None:
fig = plt.figure(figsize=(12, 3))
plt.subplot(131)
plt.bar(bincen, rh[0])
plt.xlim(0, 256)
plt.title('R Histogram')
plt.subplot(132)
plt.bar(bincen, gh[0])
plt.xlim(0, 256)
plt.title('G Histogram')
plt.subplot(133)
plt.bar(bincen, bh[0])
plt.xlim(0, 256)
plt.title('B Histogram')
fig.tight_layout()
plt.show()
else:
print('Your function is returning None for at least one variable...')
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>feat: Add scripts which Define a function to compute color histogram features<commit_after>
|
# Code given by Udacity, complete by Andres Guijarro
# Purpose: Define a function to compute color histogram features
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
image = mpimg.imread('cutout1.jpg')
# Define a function to compute color histogram features
def color_hist(img, nbins=32, bins_range=(0, 256)):
# Compute the histogram of the RGB channels separately
rhist = np.histogram(image[:, :, 0], bins=32, range=(0, 256))
ghist = np.histogram(image[:, :, 1], bins=32, range=(0, 256))
bhist = np.histogram(image[:, :, 2], bins=32, range=(0, 256))
# Generating bin centers
bin_edges = rhist[1]
bin_centers = (bin_edges[1:] + bin_edges[0:len(bin_edges) - 1]) / 2
# Concatenate the histograms into a single feature vector
hist_features = np.concatenate((rhist[0], ghist[0], bhist[0]))
# Return the individual histograms, bin_centers and feature vector
return rhist, ghist, bhist, bin_centers, hist_features
def main():
rh, gh, bh, bincen, feature_vec = color_hist(image,
nbins=32,
bins_range=(0, 256))
# Plot a figure with all three bar charts
if rh is not None:
fig = plt.figure(figsize=(12, 3))
plt.subplot(131)
plt.bar(bincen, rh[0])
plt.xlim(0, 256)
plt.title('R Histogram')
plt.subplot(132)
plt.bar(bincen, gh[0])
plt.xlim(0, 256)
plt.title('G Histogram')
plt.subplot(133)
plt.bar(bincen, bh[0])
plt.xlim(0, 256)
plt.title('B Histogram')
fig.tight_layout()
plt.show()
else:
print('Your function is returning None for at least one variable...')
if __name__ == '__main__':
main()
|
feat: Add scripts which Define a function to compute color histogram features# Code given by Udacity, complete by Andres Guijarro
# Purpose: Define a function to compute color histogram features
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
image = mpimg.imread('cutout1.jpg')
# Define a function to compute color histogram features
def color_hist(img, nbins=32, bins_range=(0, 256)):
# Compute the histogram of the RGB channels separately
rhist = np.histogram(image[:, :, 0], bins=32, range=(0, 256))
ghist = np.histogram(image[:, :, 1], bins=32, range=(0, 256))
bhist = np.histogram(image[:, :, 2], bins=32, range=(0, 256))
# Generating bin centers
bin_edges = rhist[1]
bin_centers = (bin_edges[1:] + bin_edges[0:len(bin_edges) - 1]) / 2
# Concatenate the histograms into a single feature vector
hist_features = np.concatenate((rhist[0], ghist[0], bhist[0]))
# Return the individual histograms, bin_centers and feature vector
return rhist, ghist, bhist, bin_centers, hist_features
def main():
rh, gh, bh, bincen, feature_vec = color_hist(image,
nbins=32,
bins_range=(0, 256))
# Plot a figure with all three bar charts
if rh is not None:
fig = plt.figure(figsize=(12, 3))
plt.subplot(131)
plt.bar(bincen, rh[0])
plt.xlim(0, 256)
plt.title('R Histogram')
plt.subplot(132)
plt.bar(bincen, gh[0])
plt.xlim(0, 256)
plt.title('G Histogram')
plt.subplot(133)
plt.bar(bincen, bh[0])
plt.xlim(0, 256)
plt.title('B Histogram')
fig.tight_layout()
plt.show()
else:
print('Your function is returning None for at least one variable...')
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>feat: Add scripts which Define a function to compute color histogram features<commit_after># Code given by Udacity, complete by Andres Guijarro
# Purpose: Define a function to compute color histogram features
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
image = mpimg.imread('cutout1.jpg')
# Define a function to compute color histogram features
def color_hist(img, nbins=32, bins_range=(0, 256)):
# Compute the histogram of the RGB channels separately
rhist = np.histogram(image[:, :, 0], bins=32, range=(0, 256))
ghist = np.histogram(image[:, :, 1], bins=32, range=(0, 256))
bhist = np.histogram(image[:, :, 2], bins=32, range=(0, 256))
# Generating bin centers
bin_edges = rhist[1]
bin_centers = (bin_edges[1:] + bin_edges[0:len(bin_edges) - 1]) / 2
# Concatenate the histograms into a single feature vector
hist_features = np.concatenate((rhist[0], ghist[0], bhist[0]))
# Return the individual histograms, bin_centers and feature vector
return rhist, ghist, bhist, bin_centers, hist_features
def main():
rh, gh, bh, bincen, feature_vec = color_hist(image,
nbins=32,
bins_range=(0, 256))
# Plot a figure with all three bar charts
if rh is not None:
fig = plt.figure(figsize=(12, 3))
plt.subplot(131)
plt.bar(bincen, rh[0])
plt.xlim(0, 256)
plt.title('R Histogram')
plt.subplot(132)
plt.bar(bincen, gh[0])
plt.xlim(0, 256)
plt.title('G Histogram')
plt.subplot(133)
plt.bar(bincen, bh[0])
plt.xlim(0, 256)
plt.title('B Histogram')
fig.tight_layout()
plt.show()
else:
print('Your function is returning None for at least one variable...')
if __name__ == '__main__':
main()
|
|
a2e18f9b10e5e6bbcad6c13cdc5c76047d319fc2
|
python/tests/test_pv_composite_wavelet.py
|
python/tests/test_pv_composite_wavelet.py
|
# -----------------------------------------------------------------------------
# User configuration
# -----------------------------------------------------------------------------
outputDir = '/Users/seb/Desktop/float-image/'
# -----------------------------------------------------------------------------
from paraview import simple
from tonic.paraview.dataset_builder import *
# -----------------------------------------------------------------------------
# VTK Pipeline creation
# -----------------------------------------------------------------------------
wavelet = simple.Wavelet()
calc = simple.Calculator()
calc.Function = 'coordsX'
calc.ResultArrayName = 'x'
contour = simple.Contour(
PointMergeMethod="Uniform Binning",
ComputeScalars = 1,
ComputeNormals = 1,
Isosurfaces = 157.09,
ContourBy = ['POINTS', 'RTData'])
clip = simple.Clip()
clip.ClipType.Normal = [0.0, 0.0, -1.0]
# -----------------------------------------------------------------------------
# Data To Export
# -----------------------------------------------------------------------------
layerMesh = {
'core 1': False,
'core 2': True,
'core 3': True,
'core 4': True,
'core 5': True
}
fields = ['RTData', 'x']
cores = ['core 1', 'core 2', 'core 3', 'core 4', 'core 5']
isoValues = [ 77.26, 117.18, 157.09, 197.0, 236.92 ]
# -----------------------------------------------------------------------------
# Data Generation
# -----------------------------------------------------------------------------
db = LayerDataSetBuilder(clip, outputDir, {'type': 'spherical', 'phi': range(-10, 11, 10), 'theta': range(-10, 11, 10)}, [400,400])
# Setup view with camera position
view = db.getView()
simple.Show(wavelet, view)
simple.Render(view)
simple.ResetCamera(view)
simple.Hide(wavelet, view)
db.start()
layerIdx = 0
for layer in cores:
# Select only one layer
contour.Isosurfaces = isoValues[layerIdx]
# Capture each field of each layer
for field in fields:
db.setActiveLayer(layer, field, layerMesh[layer])
db.writeLayerData()
# Move to the next layer
layerIdx += 1
db.stop()
|
Add fake limited composite example
|
Add fake limited composite example
|
Python
|
bsd-3-clause
|
Kitware/tonic-data-generator,Kitware/tonic-data-generator
|
Add fake limited composite example
|
# -----------------------------------------------------------------------------
# User configuration
# -----------------------------------------------------------------------------
outputDir = '/Users/seb/Desktop/float-image/'
# -----------------------------------------------------------------------------
from paraview import simple
from tonic.paraview.dataset_builder import *
# -----------------------------------------------------------------------------
# VTK Pipeline creation
# -----------------------------------------------------------------------------
wavelet = simple.Wavelet()
calc = simple.Calculator()
calc.Function = 'coordsX'
calc.ResultArrayName = 'x'
contour = simple.Contour(
PointMergeMethod="Uniform Binning",
ComputeScalars = 1,
ComputeNormals = 1,
Isosurfaces = 157.09,
ContourBy = ['POINTS', 'RTData'])
clip = simple.Clip()
clip.ClipType.Normal = [0.0, 0.0, -1.0]
# -----------------------------------------------------------------------------
# Data To Export
# -----------------------------------------------------------------------------
layerMesh = {
'core 1': False,
'core 2': True,
'core 3': True,
'core 4': True,
'core 5': True
}
fields = ['RTData', 'x']
cores = ['core 1', 'core 2', 'core 3', 'core 4', 'core 5']
isoValues = [ 77.26, 117.18, 157.09, 197.0, 236.92 ]
# -----------------------------------------------------------------------------
# Data Generation
# -----------------------------------------------------------------------------
db = LayerDataSetBuilder(clip, outputDir, {'type': 'spherical', 'phi': range(-10, 11, 10), 'theta': range(-10, 11, 10)}, [400,400])
# Setup view with camera position
view = db.getView()
simple.Show(wavelet, view)
simple.Render(view)
simple.ResetCamera(view)
simple.Hide(wavelet, view)
db.start()
layerIdx = 0
for layer in cores:
# Select only one layer
contour.Isosurfaces = isoValues[layerIdx]
# Capture each field of each layer
for field in fields:
db.setActiveLayer(layer, field, layerMesh[layer])
db.writeLayerData()
# Move to the next layer
layerIdx += 1
db.stop()
|
<commit_before><commit_msg>Add fake limited composite example<commit_after>
|
# -----------------------------------------------------------------------------
# User configuration
# -----------------------------------------------------------------------------
outputDir = '/Users/seb/Desktop/float-image/'
# -----------------------------------------------------------------------------
from paraview import simple
from tonic.paraview.dataset_builder import *
# -----------------------------------------------------------------------------
# VTK Pipeline creation
# -----------------------------------------------------------------------------
wavelet = simple.Wavelet()
calc = simple.Calculator()
calc.Function = 'coordsX'
calc.ResultArrayName = 'x'
contour = simple.Contour(
PointMergeMethod="Uniform Binning",
ComputeScalars = 1,
ComputeNormals = 1,
Isosurfaces = 157.09,
ContourBy = ['POINTS', 'RTData'])
clip = simple.Clip()
clip.ClipType.Normal = [0.0, 0.0, -1.0]
# -----------------------------------------------------------------------------
# Data To Export
# -----------------------------------------------------------------------------
layerMesh = {
'core 1': False,
'core 2': True,
'core 3': True,
'core 4': True,
'core 5': True
}
fields = ['RTData', 'x']
cores = ['core 1', 'core 2', 'core 3', 'core 4', 'core 5']
isoValues = [ 77.26, 117.18, 157.09, 197.0, 236.92 ]
# -----------------------------------------------------------------------------
# Data Generation
# -----------------------------------------------------------------------------
db = LayerDataSetBuilder(clip, outputDir, {'type': 'spherical', 'phi': range(-10, 11, 10), 'theta': range(-10, 11, 10)}, [400,400])
# Setup view with camera position
view = db.getView()
simple.Show(wavelet, view)
simple.Render(view)
simple.ResetCamera(view)
simple.Hide(wavelet, view)
db.start()
layerIdx = 0
for layer in cores:
# Select only one layer
contour.Isosurfaces = isoValues[layerIdx]
# Capture each field of each layer
for field in fields:
db.setActiveLayer(layer, field, layerMesh[layer])
db.writeLayerData()
# Move to the next layer
layerIdx += 1
db.stop()
|
Add fake limited composite example# -----------------------------------------------------------------------------
# User configuration
# -----------------------------------------------------------------------------
outputDir = '/Users/seb/Desktop/float-image/'
# -----------------------------------------------------------------------------
from paraview import simple
from tonic.paraview.dataset_builder import *
# -----------------------------------------------------------------------------
# VTK Pipeline creation
# -----------------------------------------------------------------------------
wavelet = simple.Wavelet()
calc = simple.Calculator()
calc.Function = 'coordsX'
calc.ResultArrayName = 'x'
contour = simple.Contour(
PointMergeMethod="Uniform Binning",
ComputeScalars = 1,
ComputeNormals = 1,
Isosurfaces = 157.09,
ContourBy = ['POINTS', 'RTData'])
clip = simple.Clip()
clip.ClipType.Normal = [0.0, 0.0, -1.0]
# -----------------------------------------------------------------------------
# Data To Export
# -----------------------------------------------------------------------------
layerMesh = {
'core 1': False,
'core 2': True,
'core 3': True,
'core 4': True,
'core 5': True
}
fields = ['RTData', 'x']
cores = ['core 1', 'core 2', 'core 3', 'core 4', 'core 5']
isoValues = [ 77.26, 117.18, 157.09, 197.0, 236.92 ]
# -----------------------------------------------------------------------------
# Data Generation
# -----------------------------------------------------------------------------
db = LayerDataSetBuilder(clip, outputDir, {'type': 'spherical', 'phi': range(-10, 11, 10), 'theta': range(-10, 11, 10)}, [400,400])
# Setup view with camera position
view = db.getView()
simple.Show(wavelet, view)
simple.Render(view)
simple.ResetCamera(view)
simple.Hide(wavelet, view)
db.start()
layerIdx = 0
for layer in cores:
# Select only one layer
contour.Isosurfaces = isoValues[layerIdx]
# Capture each field of each layer
for field in fields:
db.setActiveLayer(layer, field, layerMesh[layer])
db.writeLayerData()
# Move to the next layer
layerIdx += 1
db.stop()
|
<commit_before><commit_msg>Add fake limited composite example<commit_after># -----------------------------------------------------------------------------
# User configuration
# -----------------------------------------------------------------------------
outputDir = '/Users/seb/Desktop/float-image/'
# -----------------------------------------------------------------------------
from paraview import simple
from tonic.paraview.dataset_builder import *
# -----------------------------------------------------------------------------
# VTK Pipeline creation
# -----------------------------------------------------------------------------
wavelet = simple.Wavelet()
calc = simple.Calculator()
calc.Function = 'coordsX'
calc.ResultArrayName = 'x'
contour = simple.Contour(
PointMergeMethod="Uniform Binning",
ComputeScalars = 1,
ComputeNormals = 1,
Isosurfaces = 157.09,
ContourBy = ['POINTS', 'RTData'])
clip = simple.Clip()
clip.ClipType.Normal = [0.0, 0.0, -1.0]
# -----------------------------------------------------------------------------
# Data To Export
# -----------------------------------------------------------------------------
layerMesh = {
'core 1': False,
'core 2': True,
'core 3': True,
'core 4': True,
'core 5': True
}
fields = ['RTData', 'x']
cores = ['core 1', 'core 2', 'core 3', 'core 4', 'core 5']
isoValues = [ 77.26, 117.18, 157.09, 197.0, 236.92 ]
# -----------------------------------------------------------------------------
# Data Generation
# -----------------------------------------------------------------------------
db = LayerDataSetBuilder(clip, outputDir, {'type': 'spherical', 'phi': range(-10, 11, 10), 'theta': range(-10, 11, 10)}, [400,400])
# Setup view with camera position
view = db.getView()
simple.Show(wavelet, view)
simple.Render(view)
simple.ResetCamera(view)
simple.Hide(wavelet, view)
db.start()
layerIdx = 0
for layer in cores:
# Select only one layer
contour.Isosurfaces = isoValues[layerIdx]
# Capture each field of each layer
for field in fields:
db.setActiveLayer(layer, field, layerMesh[layer])
db.writeLayerData()
# Move to the next layer
layerIdx += 1
db.stop()
|
|
6a2bd578cc22231bce66a4d110b4ff1536743097
|
dataactcore/migrations/versions/7597deb348fb_fabs_created_at_and_fpds_updated_at_.py
|
dataactcore/migrations/versions/7597deb348fb_fabs_created_at_and_fpds_updated_at_.py
|
"""FABS created_at and FPDS updated_at indexes
Revision ID: 7597deb348fb
Revises: b168f0cdc5a8
Create Date: 2018-02-06 16:08:20.985202
"""
# revision identifiers, used by Alembic.
revision = '7597deb348fb'
down_revision = 'b168f0cdc5a8'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.create_index(op.f('ix_detached_award_procurement_updated_at'), 'detached_award_procurement', ['updated_at'], unique=False)
op.create_index(op.f('ix_published_award_financial_assistance_created_at'), 'published_award_financial_assistance', ['created_at'], unique=False)
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_detached_award_procurement_updated_at'), table_name='detached_award_procurement')
op.drop_index(op.f('ix_published_award_financial_assistance_created_at'), table_name='published_award_financial_assistance')
### end Alembic commands ###
|
Add index to the `created_at` column in `published_award_financial_assistance` and the `updated_at` column in `detached_award_procurement`
|
Add index to the `created_at` column in `published_award_financial_assistance` and the `updated_at` column in `detached_award_procurement`
|
Python
|
cc0-1.0
|
fedspendingtransparency/data-act-broker-backend,fedspendingtransparency/data-act-broker-backend
|
Add index to the `created_at` column in `published_award_financial_assistance` and the `updated_at` column in `detached_award_procurement`
|
"""FABS created_at and FPDS updated_at indexes
Revision ID: 7597deb348fb
Revises: b168f0cdc5a8
Create Date: 2018-02-06 16:08:20.985202
"""
# revision identifiers, used by Alembic.
revision = '7597deb348fb'
down_revision = 'b168f0cdc5a8'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.create_index(op.f('ix_detached_award_procurement_updated_at'), 'detached_award_procurement', ['updated_at'], unique=False)
op.create_index(op.f('ix_published_award_financial_assistance_created_at'), 'published_award_financial_assistance', ['created_at'], unique=False)
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_detached_award_procurement_updated_at'), table_name='detached_award_procurement')
op.drop_index(op.f('ix_published_award_financial_assistance_created_at'), table_name='published_award_financial_assistance')
### end Alembic commands ###
|
<commit_before><commit_msg>Add index to the `created_at` column in `published_award_financial_assistance` and the `updated_at` column in `detached_award_procurement`<commit_after>
|
"""FABS created_at and FPDS updated_at indexes
Revision ID: 7597deb348fb
Revises: b168f0cdc5a8
Create Date: 2018-02-06 16:08:20.985202
"""
# revision identifiers, used by Alembic.
revision = '7597deb348fb'
down_revision = 'b168f0cdc5a8'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.create_index(op.f('ix_detached_award_procurement_updated_at'), 'detached_award_procurement', ['updated_at'], unique=False)
op.create_index(op.f('ix_published_award_financial_assistance_created_at'), 'published_award_financial_assistance', ['created_at'], unique=False)
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_detached_award_procurement_updated_at'), table_name='detached_award_procurement')
op.drop_index(op.f('ix_published_award_financial_assistance_created_at'), table_name='published_award_financial_assistance')
### end Alembic commands ###
|
Add index to the `created_at` column in `published_award_financial_assistance` and the `updated_at` column in `detached_award_procurement`"""FABS created_at and FPDS updated_at indexes
Revision ID: 7597deb348fb
Revises: b168f0cdc5a8
Create Date: 2018-02-06 16:08:20.985202
"""
# revision identifiers, used by Alembic.
revision = '7597deb348fb'
down_revision = 'b168f0cdc5a8'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.create_index(op.f('ix_detached_award_procurement_updated_at'), 'detached_award_procurement', ['updated_at'], unique=False)
op.create_index(op.f('ix_published_award_financial_assistance_created_at'), 'published_award_financial_assistance', ['created_at'], unique=False)
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_detached_award_procurement_updated_at'), table_name='detached_award_procurement')
op.drop_index(op.f('ix_published_award_financial_assistance_created_at'), table_name='published_award_financial_assistance')
### end Alembic commands ###
|
<commit_before><commit_msg>Add index to the `created_at` column in `published_award_financial_assistance` and the `updated_at` column in `detached_award_procurement`<commit_after>"""FABS created_at and FPDS updated_at indexes
Revision ID: 7597deb348fb
Revises: b168f0cdc5a8
Create Date: 2018-02-06 16:08:20.985202
"""
# revision identifiers, used by Alembic.
revision = '7597deb348fb'
down_revision = 'b168f0cdc5a8'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.create_index(op.f('ix_detached_award_procurement_updated_at'), 'detached_award_procurement', ['updated_at'], unique=False)
op.create_index(op.f('ix_published_award_financial_assistance_created_at'), 'published_award_financial_assistance', ['created_at'], unique=False)
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_detached_award_procurement_updated_at'), table_name='detached_award_procurement')
op.drop_index(op.f('ix_published_award_financial_assistance_created_at'), table_name='published_award_financial_assistance')
### end Alembic commands ###
|
|
29d0797540461f8c021ecad6e5d1e724dcc3e378
|
tardis/tests/tests_slow/runner.py
|
tardis/tests/tests_slow/runner.py
|
import time
import subprocess
if __name__ == "__main__":
while True:
subprocess.call([
"python", "setup.py", "test", "--test-path=tardis/tests/test_util.py",
])
time.sleep(20)
|
Make a simple infinite while loop to run tests.
|
Make a simple infinite while loop to run tests.
|
Python
|
bsd-3-clause
|
orbitfold/tardis,kaushik94/tardis,kaushik94/tardis,orbitfold/tardis,orbitfold/tardis,orbitfold/tardis,kaushik94/tardis,kaushik94/tardis
|
Make a simple infinite while loop to run tests.
|
import time
import subprocess
if __name__ == "__main__":
while True:
subprocess.call([
"python", "setup.py", "test", "--test-path=tardis/tests/test_util.py",
])
time.sleep(20)
|
<commit_before><commit_msg>Make a simple infinite while loop to run tests.<commit_after>
|
import time
import subprocess
if __name__ == "__main__":
while True:
subprocess.call([
"python", "setup.py", "test", "--test-path=tardis/tests/test_util.py",
])
time.sleep(20)
|
Make a simple infinite while loop to run tests.import time
import subprocess
if __name__ == "__main__":
while True:
subprocess.call([
"python", "setup.py", "test", "--test-path=tardis/tests/test_util.py",
])
time.sleep(20)
|
<commit_before><commit_msg>Make a simple infinite while loop to run tests.<commit_after>import time
import subprocess
if __name__ == "__main__":
while True:
subprocess.call([
"python", "setup.py", "test", "--test-path=tardis/tests/test_util.py",
])
time.sleep(20)
|
|
9013f072a8b82ab65ad2c599fe331f7835ebee47
|
{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/users/tests/test_urls.py
|
{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/users/tests/test_urls.py
|
from django.core.urlresolvers import reverse, resolve
from test_plus.test import TestCase
class TestUserURLs(TestCase):
"""Test URL patterns for users app."""
def setUp(self):
self.user = self.make_user()
def test_list_reverse(self):
"""users:list should reverse to /users/."""
self.assertEqual(reverse('users:list'), '/users/')
def test_list_resolve(self):
"""/users/ should resolve to users:list."""
self.assertEqual(resolve('/users/').view_name, 'users:list')
def test_redirect_reverse(self):
"""users:redirect should reverse to /users/~redirect/."""
self.assertEqual(reverse('users:redirect'), '/users/~redirect/')
def test_redirect_resolve(self):
"""/users/~redirect/ should resolve to users:redirect."""
self.assertEqual(
resolve('/users/~redirect/').view_name,
'users:redirect'
)
def test_detail_reverse(self):
"""users:detail should reverse to /users/testuser/."""
self.assertEqual(
reverse('users:detail', kwargs={'username': 'testuser'}),
'/users/testuser/'
)
def test_detail_resolve(self):
"""/users/testuser/ should resolve to users:detail."""
self.assertEqual(resolve('/users/testuser/').view_name, 'users:detail')
def test_update_reverse(self):
"""users:update should reverse to /users/~update/."""
self.assertEqual(reverse('users:update'), '/users/~update/')
def test_update_resolve(self):
"""/users/~update/ should resolve to users:update."""
self.assertEqual(
resolve('/users/~update/').view_name,
'users:update'
)
|
Test users app URL patterns
|
Test users app URL patterns
For the sake of completeness, and since regular expressions can be error-prone.
|
Python
|
bsd-3-clause
|
hackebrot/cookiecutter-django,topwebmaster/cookiecutter-django,trungdong/cookiecutter-django,thisjustin/cookiecutter-django,luzfcb/cookiecutter-django,webyneter/cookiecutter-django,aleprovencio/cookiecutter-django,hackebrot/cookiecutter-django,hairychris/cookiecutter-django,gappsexperts/cookiecutter-django,asyncee/cookiecutter-django,hairychris/cookiecutter-django,ad-m/cookiecutter-django,topwebmaster/cookiecutter-django,gappsexperts/cookiecutter-django,thisjustin/cookiecutter-django,gappsexperts/cookiecutter-django,asyncee/cookiecutter-django,hairychris/cookiecutter-django,webyneter/cookiecutter-django,ryankanno/cookiecutter-django,ddiazpinto/cookiecutter-django,schacki/cookiecutter-django,pydanny/cookiecutter-django,schacki/cookiecutter-django,asyncee/cookiecutter-django,thisjustin/cookiecutter-django,webspired/cookiecutter-django,kappataumu/cookiecutter-django,Parbhat/cookiecutter-django-foundation,mistalaba/cookiecutter-django,Parbhat/cookiecutter-django-foundation,trungdong/cookiecutter-django,webyneter/cookiecutter-django,bopo/cookiecutter-django,schacki/cookiecutter-django,webspired/cookiecutter-django,Parbhat/cookiecutter-django-foundation,schacki/cookiecutter-django,ad-m/cookiecutter-django,ddiazpinto/cookiecutter-django,gappsexperts/cookiecutter-django,kappataumu/cookiecutter-django,topwebmaster/cookiecutter-django,luzfcb/cookiecutter-django,ddiazpinto/cookiecutter-django,bopo/cookiecutter-django,asyncee/cookiecutter-django,ad-m/cookiecutter-django,luzfcb/cookiecutter-django,trungdong/cookiecutter-django,aleprovencio/cookiecutter-django,pydanny/cookiecutter-django,ryankanno/cookiecutter-django,hackebrot/cookiecutter-django,mistalaba/cookiecutter-django,pydanny/cookiecutter-django,ryankanno/cookiecutter-django,trungdong/cookiecutter-django,mistalaba/cookiecutter-django,hackebrot/cookiecutter-django,Parbhat/cookiecutter-django-foundation,mistalaba/cookiecutter-django,pydanny/cookiecutter-django,bopo/cookiecutter-django,webyneter/cookiecutter-django,topwebmaster/cookiecutter-django,webspired/cookiecutter-django,luzfcb/cookiecutter-django,aleprovencio/cookiecutter-django,hairychris/cookiecutter-django,ad-m/cookiecutter-django,ryankanno/cookiecutter-django,kappataumu/cookiecutter-django,aleprovencio/cookiecutter-django,bopo/cookiecutter-django,ddiazpinto/cookiecutter-django,kappataumu/cookiecutter-django,thisjustin/cookiecutter-django,webspired/cookiecutter-django
|
Test users app URL patterns
For the sake of completeness, and since regular expressions can be error-prone.
|
from django.core.urlresolvers import reverse, resolve
from test_plus.test import TestCase
class TestUserURLs(TestCase):
"""Test URL patterns for users app."""
def setUp(self):
self.user = self.make_user()
def test_list_reverse(self):
"""users:list should reverse to /users/."""
self.assertEqual(reverse('users:list'), '/users/')
def test_list_resolve(self):
"""/users/ should resolve to users:list."""
self.assertEqual(resolve('/users/').view_name, 'users:list')
def test_redirect_reverse(self):
"""users:redirect should reverse to /users/~redirect/."""
self.assertEqual(reverse('users:redirect'), '/users/~redirect/')
def test_redirect_resolve(self):
"""/users/~redirect/ should resolve to users:redirect."""
self.assertEqual(
resolve('/users/~redirect/').view_name,
'users:redirect'
)
def test_detail_reverse(self):
"""users:detail should reverse to /users/testuser/."""
self.assertEqual(
reverse('users:detail', kwargs={'username': 'testuser'}),
'/users/testuser/'
)
def test_detail_resolve(self):
"""/users/testuser/ should resolve to users:detail."""
self.assertEqual(resolve('/users/testuser/').view_name, 'users:detail')
def test_update_reverse(self):
"""users:update should reverse to /users/~update/."""
self.assertEqual(reverse('users:update'), '/users/~update/')
def test_update_resolve(self):
"""/users/~update/ should resolve to users:update."""
self.assertEqual(
resolve('/users/~update/').view_name,
'users:update'
)
|
<commit_before><commit_msg>Test users app URL patterns
For the sake of completeness, and since regular expressions can be error-prone.<commit_after>
|
from django.core.urlresolvers import reverse, resolve
from test_plus.test import TestCase
class TestUserURLs(TestCase):
"""Test URL patterns for users app."""
def setUp(self):
self.user = self.make_user()
def test_list_reverse(self):
"""users:list should reverse to /users/."""
self.assertEqual(reverse('users:list'), '/users/')
def test_list_resolve(self):
"""/users/ should resolve to users:list."""
self.assertEqual(resolve('/users/').view_name, 'users:list')
def test_redirect_reverse(self):
"""users:redirect should reverse to /users/~redirect/."""
self.assertEqual(reverse('users:redirect'), '/users/~redirect/')
def test_redirect_resolve(self):
"""/users/~redirect/ should resolve to users:redirect."""
self.assertEqual(
resolve('/users/~redirect/').view_name,
'users:redirect'
)
def test_detail_reverse(self):
"""users:detail should reverse to /users/testuser/."""
self.assertEqual(
reverse('users:detail', kwargs={'username': 'testuser'}),
'/users/testuser/'
)
def test_detail_resolve(self):
"""/users/testuser/ should resolve to users:detail."""
self.assertEqual(resolve('/users/testuser/').view_name, 'users:detail')
def test_update_reverse(self):
"""users:update should reverse to /users/~update/."""
self.assertEqual(reverse('users:update'), '/users/~update/')
def test_update_resolve(self):
"""/users/~update/ should resolve to users:update."""
self.assertEqual(
resolve('/users/~update/').view_name,
'users:update'
)
|
Test users app URL patterns
For the sake of completeness, and since regular expressions can be error-prone.from django.core.urlresolvers import reverse, resolve
from test_plus.test import TestCase
class TestUserURLs(TestCase):
"""Test URL patterns for users app."""
def setUp(self):
self.user = self.make_user()
def test_list_reverse(self):
"""users:list should reverse to /users/."""
self.assertEqual(reverse('users:list'), '/users/')
def test_list_resolve(self):
"""/users/ should resolve to users:list."""
self.assertEqual(resolve('/users/').view_name, 'users:list')
def test_redirect_reverse(self):
"""users:redirect should reverse to /users/~redirect/."""
self.assertEqual(reverse('users:redirect'), '/users/~redirect/')
def test_redirect_resolve(self):
"""/users/~redirect/ should resolve to users:redirect."""
self.assertEqual(
resolve('/users/~redirect/').view_name,
'users:redirect'
)
def test_detail_reverse(self):
"""users:detail should reverse to /users/testuser/."""
self.assertEqual(
reverse('users:detail', kwargs={'username': 'testuser'}),
'/users/testuser/'
)
def test_detail_resolve(self):
"""/users/testuser/ should resolve to users:detail."""
self.assertEqual(resolve('/users/testuser/').view_name, 'users:detail')
def test_update_reverse(self):
"""users:update should reverse to /users/~update/."""
self.assertEqual(reverse('users:update'), '/users/~update/')
def test_update_resolve(self):
"""/users/~update/ should resolve to users:update."""
self.assertEqual(
resolve('/users/~update/').view_name,
'users:update'
)
|
<commit_before><commit_msg>Test users app URL patterns
For the sake of completeness, and since regular expressions can be error-prone.<commit_after>from django.core.urlresolvers import reverse, resolve
from test_plus.test import TestCase
class TestUserURLs(TestCase):
"""Test URL patterns for users app."""
def setUp(self):
self.user = self.make_user()
def test_list_reverse(self):
"""users:list should reverse to /users/."""
self.assertEqual(reverse('users:list'), '/users/')
def test_list_resolve(self):
"""/users/ should resolve to users:list."""
self.assertEqual(resolve('/users/').view_name, 'users:list')
def test_redirect_reverse(self):
"""users:redirect should reverse to /users/~redirect/."""
self.assertEqual(reverse('users:redirect'), '/users/~redirect/')
def test_redirect_resolve(self):
"""/users/~redirect/ should resolve to users:redirect."""
self.assertEqual(
resolve('/users/~redirect/').view_name,
'users:redirect'
)
def test_detail_reverse(self):
"""users:detail should reverse to /users/testuser/."""
self.assertEqual(
reverse('users:detail', kwargs={'username': 'testuser'}),
'/users/testuser/'
)
def test_detail_resolve(self):
"""/users/testuser/ should resolve to users:detail."""
self.assertEqual(resolve('/users/testuser/').view_name, 'users:detail')
def test_update_reverse(self):
"""users:update should reverse to /users/~update/."""
self.assertEqual(reverse('users:update'), '/users/~update/')
def test_update_resolve(self):
"""/users/~update/ should resolve to users:update."""
self.assertEqual(
resolve('/users/~update/').view_name,
'users:update'
)
|
|
e7685951e1d271b07df0e4a0681a2404806f4028
|
InvenTree/stock/test_api.py
|
InvenTree/stock/test_api.py
|
from rest_framework.test import APITestCase
from rest_framework import status
from django.urls import reverse
from django.contrib.auth import get_user_model
from .models import StockLocation, StockItem
class StockLocationTest(APITestCase):
"""
Series of API tests for the StockLocation API
"""
list_url = reverse('api-location-list')
def setUp(self):
# Create a user for auth
User = get_user_model()
User.objects.create_user('testuser', 'test@testing.com', 'password')
self.client.login(username='testuser', password='password')
# Add some stock locations
StockLocation.objects.create(name='top', description='top category')
def test_list(self):
# Check that we can request the StockLocation list
response = self.client.get(self.list_url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertGreaterEqual(len(response.data), 1)
def test_add(self):
# Check that we can add a new StockLocation
data = {
'parent': 1,
'name': 'Location',
'description': 'Another location for stock'
}
response = self.client.post(self.list_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
class StockItemTest(APITestCase):
"""
Series of API tests for the StockItem API
"""
list_url = reverse('api-stock-list')
def detail_url(self, pk):
return revere('api-stock-detail', kwargs={'pk': pk})
def setUp(self):
# Create a user for auth
User = get_user_model()
User.objects.create_user('testuser', 'test@testing.com', 'password')
self.client.login(username='testuser', password='password')
# Create some stock locations
top = StockLocation.objects.create(name='A', description='top')
StockLocation.objects.create(name='B', description='location b', parent=top)
StockLocation.objects.create(name='C', description='location c', parent=top)
def test_get_stock_list(self):
response = self.client.get(self.list_url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
|
Add (simple) test cases for Stock API
|
Add (simple) test cases for Stock API
- Still a lot of work to do here
|
Python
|
mit
|
SchrodingersGat/InvenTree,SchrodingersGat/InvenTree,SchrodingersGat/InvenTree,inventree/InvenTree,SchrodingersGat/InvenTree,inventree/InvenTree,inventree/InvenTree,inventree/InvenTree
|
Add (simple) test cases for Stock API
- Still a lot of work to do here
|
from rest_framework.test import APITestCase
from rest_framework import status
from django.urls import reverse
from django.contrib.auth import get_user_model
from .models import StockLocation, StockItem
class StockLocationTest(APITestCase):
"""
Series of API tests for the StockLocation API
"""
list_url = reverse('api-location-list')
def setUp(self):
# Create a user for auth
User = get_user_model()
User.objects.create_user('testuser', 'test@testing.com', 'password')
self.client.login(username='testuser', password='password')
# Add some stock locations
StockLocation.objects.create(name='top', description='top category')
def test_list(self):
# Check that we can request the StockLocation list
response = self.client.get(self.list_url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertGreaterEqual(len(response.data), 1)
def test_add(self):
# Check that we can add a new StockLocation
data = {
'parent': 1,
'name': 'Location',
'description': 'Another location for stock'
}
response = self.client.post(self.list_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
class StockItemTest(APITestCase):
"""
Series of API tests for the StockItem API
"""
list_url = reverse('api-stock-list')
def detail_url(self, pk):
return revere('api-stock-detail', kwargs={'pk': pk})
def setUp(self):
# Create a user for auth
User = get_user_model()
User.objects.create_user('testuser', 'test@testing.com', 'password')
self.client.login(username='testuser', password='password')
# Create some stock locations
top = StockLocation.objects.create(name='A', description='top')
StockLocation.objects.create(name='B', description='location b', parent=top)
StockLocation.objects.create(name='C', description='location c', parent=top)
def test_get_stock_list(self):
response = self.client.get(self.list_url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
|
<commit_before><commit_msg>Add (simple) test cases for Stock API
- Still a lot of work to do here<commit_after>
|
from rest_framework.test import APITestCase
from rest_framework import status
from django.urls import reverse
from django.contrib.auth import get_user_model
from .models import StockLocation, StockItem
class StockLocationTest(APITestCase):
"""
Series of API tests for the StockLocation API
"""
list_url = reverse('api-location-list')
def setUp(self):
# Create a user for auth
User = get_user_model()
User.objects.create_user('testuser', 'test@testing.com', 'password')
self.client.login(username='testuser', password='password')
# Add some stock locations
StockLocation.objects.create(name='top', description='top category')
def test_list(self):
# Check that we can request the StockLocation list
response = self.client.get(self.list_url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertGreaterEqual(len(response.data), 1)
def test_add(self):
# Check that we can add a new StockLocation
data = {
'parent': 1,
'name': 'Location',
'description': 'Another location for stock'
}
response = self.client.post(self.list_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
class StockItemTest(APITestCase):
"""
Series of API tests for the StockItem API
"""
list_url = reverse('api-stock-list')
def detail_url(self, pk):
return revere('api-stock-detail', kwargs={'pk': pk})
def setUp(self):
# Create a user for auth
User = get_user_model()
User.objects.create_user('testuser', 'test@testing.com', 'password')
self.client.login(username='testuser', password='password')
# Create some stock locations
top = StockLocation.objects.create(name='A', description='top')
StockLocation.objects.create(name='B', description='location b', parent=top)
StockLocation.objects.create(name='C', description='location c', parent=top)
def test_get_stock_list(self):
response = self.client.get(self.list_url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
|
Add (simple) test cases for Stock API
- Still a lot of work to do herefrom rest_framework.test import APITestCase
from rest_framework import status
from django.urls import reverse
from django.contrib.auth import get_user_model
from .models import StockLocation, StockItem
class StockLocationTest(APITestCase):
"""
Series of API tests for the StockLocation API
"""
list_url = reverse('api-location-list')
def setUp(self):
# Create a user for auth
User = get_user_model()
User.objects.create_user('testuser', 'test@testing.com', 'password')
self.client.login(username='testuser', password='password')
# Add some stock locations
StockLocation.objects.create(name='top', description='top category')
def test_list(self):
# Check that we can request the StockLocation list
response = self.client.get(self.list_url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertGreaterEqual(len(response.data), 1)
def test_add(self):
# Check that we can add a new StockLocation
data = {
'parent': 1,
'name': 'Location',
'description': 'Another location for stock'
}
response = self.client.post(self.list_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
class StockItemTest(APITestCase):
"""
Series of API tests for the StockItem API
"""
list_url = reverse('api-stock-list')
def detail_url(self, pk):
return revere('api-stock-detail', kwargs={'pk': pk})
def setUp(self):
# Create a user for auth
User = get_user_model()
User.objects.create_user('testuser', 'test@testing.com', 'password')
self.client.login(username='testuser', password='password')
# Create some stock locations
top = StockLocation.objects.create(name='A', description='top')
StockLocation.objects.create(name='B', description='location b', parent=top)
StockLocation.objects.create(name='C', description='location c', parent=top)
def test_get_stock_list(self):
response = self.client.get(self.list_url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
|
<commit_before><commit_msg>Add (simple) test cases for Stock API
- Still a lot of work to do here<commit_after>from rest_framework.test import APITestCase
from rest_framework import status
from django.urls import reverse
from django.contrib.auth import get_user_model
from .models import StockLocation, StockItem
class StockLocationTest(APITestCase):
"""
Series of API tests for the StockLocation API
"""
list_url = reverse('api-location-list')
def setUp(self):
# Create a user for auth
User = get_user_model()
User.objects.create_user('testuser', 'test@testing.com', 'password')
self.client.login(username='testuser', password='password')
# Add some stock locations
StockLocation.objects.create(name='top', description='top category')
def test_list(self):
# Check that we can request the StockLocation list
response = self.client.get(self.list_url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertGreaterEqual(len(response.data), 1)
def test_add(self):
# Check that we can add a new StockLocation
data = {
'parent': 1,
'name': 'Location',
'description': 'Another location for stock'
}
response = self.client.post(self.list_url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
class StockItemTest(APITestCase):
"""
Series of API tests for the StockItem API
"""
list_url = reverse('api-stock-list')
def detail_url(self, pk):
return revere('api-stock-detail', kwargs={'pk': pk})
def setUp(self):
# Create a user for auth
User = get_user_model()
User.objects.create_user('testuser', 'test@testing.com', 'password')
self.client.login(username='testuser', password='password')
# Create some stock locations
top = StockLocation.objects.create(name='A', description='top')
StockLocation.objects.create(name='B', description='location b', parent=top)
StockLocation.objects.create(name='C', description='location c', parent=top)
def test_get_stock_list(self):
response = self.client.get(self.list_url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
|
|
5c142d7e7a311013dd940a6d6900b5d9984dc0fe
|
python/dynamic_image_meme.py
|
python/dynamic_image_meme.py
|
import requests
import json
# Dynamically create a 300x300 PNG image with a yellow background and draw some text on the center of it later.
# Refer to https://pixlab.io/#/cmd?id=newimage && https://pixlab.io/#/cmd?id=drawtext for additional information.
req = requests.get('https://api.pixlab.io/newimage',params={
'key':'My_Pix_Key',
"width":300,
"height":300,
"color":"yellow"
})
reply = req.json()
if reply['status'] != 200:
print (reply['error'])
exit();
# Link to the new image
img = reply['link'];
# Draw some text now on the new image
req = requests.get('https://api.pixlab.io/drawtext',params={
'img':img, #The newly created image
'key':'My_Pix_Key',
"cap":True, #Uppercase
"color":"black", #Text color
"font":"wolf",
"center":"bonjour"
})
reply = req.json()
if reply['status'] != 200:
print (reply['error'])
else:
print ("Pic location: "+ reply['link'])
|
Create dynamic image & draw some text on it
|
Create dynamic image & draw some text on it
|
Python
|
bsd-2-clause
|
symisc/pixlab,symisc/pixlab,symisc/pixlab
|
Create dynamic image & draw some text on it
|
import requests
import json
# Dynamically create a 300x300 PNG image with a yellow background and draw some text on the center of it later.
# Refer to https://pixlab.io/#/cmd?id=newimage && https://pixlab.io/#/cmd?id=drawtext for additional information.
req = requests.get('https://api.pixlab.io/newimage',params={
'key':'My_Pix_Key',
"width":300,
"height":300,
"color":"yellow"
})
reply = req.json()
if reply['status'] != 200:
print (reply['error'])
exit();
# Link to the new image
img = reply['link'];
# Draw some text now on the new image
req = requests.get('https://api.pixlab.io/drawtext',params={
'img':img, #The newly created image
'key':'My_Pix_Key',
"cap":True, #Uppercase
"color":"black", #Text color
"font":"wolf",
"center":"bonjour"
})
reply = req.json()
if reply['status'] != 200:
print (reply['error'])
else:
print ("Pic location: "+ reply['link'])
|
<commit_before><commit_msg>Create dynamic image & draw some text on it<commit_after>
|
import requests
import json
# Dynamically create a 300x300 PNG image with a yellow background and draw some text on the center of it later.
# Refer to https://pixlab.io/#/cmd?id=newimage && https://pixlab.io/#/cmd?id=drawtext for additional information.
req = requests.get('https://api.pixlab.io/newimage',params={
'key':'My_Pix_Key',
"width":300,
"height":300,
"color":"yellow"
})
reply = req.json()
if reply['status'] != 200:
print (reply['error'])
exit();
# Link to the new image
img = reply['link'];
# Draw some text now on the new image
req = requests.get('https://api.pixlab.io/drawtext',params={
'img':img, #The newly created image
'key':'My_Pix_Key',
"cap":True, #Uppercase
"color":"black", #Text color
"font":"wolf",
"center":"bonjour"
})
reply = req.json()
if reply['status'] != 200:
print (reply['error'])
else:
print ("Pic location: "+ reply['link'])
|
Create dynamic image & draw some text on itimport requests
import json
# Dynamically create a 300x300 PNG image with a yellow background and draw some text on the center of it later.
# Refer to https://pixlab.io/#/cmd?id=newimage && https://pixlab.io/#/cmd?id=drawtext for additional information.
req = requests.get('https://api.pixlab.io/newimage',params={
'key':'My_Pix_Key',
"width":300,
"height":300,
"color":"yellow"
})
reply = req.json()
if reply['status'] != 200:
print (reply['error'])
exit();
# Link to the new image
img = reply['link'];
# Draw some text now on the new image
req = requests.get('https://api.pixlab.io/drawtext',params={
'img':img, #The newly created image
'key':'My_Pix_Key',
"cap":True, #Uppercase
"color":"black", #Text color
"font":"wolf",
"center":"bonjour"
})
reply = req.json()
if reply['status'] != 200:
print (reply['error'])
else:
print ("Pic location: "+ reply['link'])
|
<commit_before><commit_msg>Create dynamic image & draw some text on it<commit_after>import requests
import json
# Dynamically create a 300x300 PNG image with a yellow background and draw some text on the center of it later.
# Refer to https://pixlab.io/#/cmd?id=newimage && https://pixlab.io/#/cmd?id=drawtext for additional information.
req = requests.get('https://api.pixlab.io/newimage',params={
'key':'My_Pix_Key',
"width":300,
"height":300,
"color":"yellow"
})
reply = req.json()
if reply['status'] != 200:
print (reply['error'])
exit();
# Link to the new image
img = reply['link'];
# Draw some text now on the new image
req = requests.get('https://api.pixlab.io/drawtext',params={
'img':img, #The newly created image
'key':'My_Pix_Key',
"cap":True, #Uppercase
"color":"black", #Text color
"font":"wolf",
"center":"bonjour"
})
reply = req.json()
if reply['status'] != 200:
print (reply['error'])
else:
print ("Pic location: "+ reply['link'])
|
|
88877163201ce32d28633b833e1ec17cd3429650
|
python/misc/clean-sms-mms.py
|
python/misc/clean-sms-mms.py
|
#!/usr/bin/env python3
''' Deletes old messages from a backup file created by Titanium Backup Pro
'''
import datetime
import lxml.etree
import shutil
import sys
MAXIMUM_MESSAGE_AGE_IN_DAYS = 365
if len(sys.argv) < 2:
sys.exit('USAGE: %s /path/to/com.keramidas.virtual.XML_MESSAGES-XXXXXXXX-XXXXXX.xml' % (sys.argv[0]))
infile_name = sys.argv[1]
# Create a backup copy since we'll modify the original
outfile_name = infile_name + '.bak'
shutil.copy2(infile_name, outfile_name)
# Remove any SMS/MMS messages older than MAXIMUM_MESSAGE_AGE_IN_DAYS
root = lxml.etree.parse(infile_name)
for element in root.iter():
if element.tag == '{http://www.titaniumtrack.com/ns/titanium-backup/messages}sms' \
or element.tag == '{http://www.titaniumtrack.com/ns/titanium-backup/messages}mms':
message_date = datetime.datetime.strptime(element.get('date'), '%Y-%m-%dT%H:%M:%S.%fZ')
if datetime.datetime.now() - message_date > datetime.timedelta(MAXIMUM_MESSAGE_AGE_IN_DAYS):
element.getparent().remove(element)
with open(infile_name, 'wb') as infile:
infile.write(lxml.etree.tostring(root, pretty_print=True, xml_declaration=True))
|
Add script for cleaning up old SMS/MMS text messages
|
Add script for cleaning up old SMS/MMS text messages
|
Python
|
mit
|
bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile
|
Add script for cleaning up old SMS/MMS text messages
|
#!/usr/bin/env python3
''' Deletes old messages from a backup file created by Titanium Backup Pro
'''
import datetime
import lxml.etree
import shutil
import sys
MAXIMUM_MESSAGE_AGE_IN_DAYS = 365
if len(sys.argv) < 2:
sys.exit('USAGE: %s /path/to/com.keramidas.virtual.XML_MESSAGES-XXXXXXXX-XXXXXX.xml' % (sys.argv[0]))
infile_name = sys.argv[1]
# Create a backup copy since we'll modify the original
outfile_name = infile_name + '.bak'
shutil.copy2(infile_name, outfile_name)
# Remove any SMS/MMS messages older than MAXIMUM_MESSAGE_AGE_IN_DAYS
root = lxml.etree.parse(infile_name)
for element in root.iter():
if element.tag == '{http://www.titaniumtrack.com/ns/titanium-backup/messages}sms' \
or element.tag == '{http://www.titaniumtrack.com/ns/titanium-backup/messages}mms':
message_date = datetime.datetime.strptime(element.get('date'), '%Y-%m-%dT%H:%M:%S.%fZ')
if datetime.datetime.now() - message_date > datetime.timedelta(MAXIMUM_MESSAGE_AGE_IN_DAYS):
element.getparent().remove(element)
with open(infile_name, 'wb') as infile:
infile.write(lxml.etree.tostring(root, pretty_print=True, xml_declaration=True))
|
<commit_before><commit_msg>Add script for cleaning up old SMS/MMS text messages<commit_after>
|
#!/usr/bin/env python3
''' Deletes old messages from a backup file created by Titanium Backup Pro
'''
import datetime
import lxml.etree
import shutil
import sys
MAXIMUM_MESSAGE_AGE_IN_DAYS = 365
if len(sys.argv) < 2:
sys.exit('USAGE: %s /path/to/com.keramidas.virtual.XML_MESSAGES-XXXXXXXX-XXXXXX.xml' % (sys.argv[0]))
infile_name = sys.argv[1]
# Create a backup copy since we'll modify the original
outfile_name = infile_name + '.bak'
shutil.copy2(infile_name, outfile_name)
# Remove any SMS/MMS messages older than MAXIMUM_MESSAGE_AGE_IN_DAYS
root = lxml.etree.parse(infile_name)
for element in root.iter():
if element.tag == '{http://www.titaniumtrack.com/ns/titanium-backup/messages}sms' \
or element.tag == '{http://www.titaniumtrack.com/ns/titanium-backup/messages}mms':
message_date = datetime.datetime.strptime(element.get('date'), '%Y-%m-%dT%H:%M:%S.%fZ')
if datetime.datetime.now() - message_date > datetime.timedelta(MAXIMUM_MESSAGE_AGE_IN_DAYS):
element.getparent().remove(element)
with open(infile_name, 'wb') as infile:
infile.write(lxml.etree.tostring(root, pretty_print=True, xml_declaration=True))
|
Add script for cleaning up old SMS/MMS text messages#!/usr/bin/env python3
''' Deletes old messages from a backup file created by Titanium Backup Pro
'''
import datetime
import lxml.etree
import shutil
import sys
MAXIMUM_MESSAGE_AGE_IN_DAYS = 365
if len(sys.argv) < 2:
sys.exit('USAGE: %s /path/to/com.keramidas.virtual.XML_MESSAGES-XXXXXXXX-XXXXXX.xml' % (sys.argv[0]))
infile_name = sys.argv[1]
# Create a backup copy since we'll modify the original
outfile_name = infile_name + '.bak'
shutil.copy2(infile_name, outfile_name)
# Remove any SMS/MMS messages older than MAXIMUM_MESSAGE_AGE_IN_DAYS
root = lxml.etree.parse(infile_name)
for element in root.iter():
if element.tag == '{http://www.titaniumtrack.com/ns/titanium-backup/messages}sms' \
or element.tag == '{http://www.titaniumtrack.com/ns/titanium-backup/messages}mms':
message_date = datetime.datetime.strptime(element.get('date'), '%Y-%m-%dT%H:%M:%S.%fZ')
if datetime.datetime.now() - message_date > datetime.timedelta(MAXIMUM_MESSAGE_AGE_IN_DAYS):
element.getparent().remove(element)
with open(infile_name, 'wb') as infile:
infile.write(lxml.etree.tostring(root, pretty_print=True, xml_declaration=True))
|
<commit_before><commit_msg>Add script for cleaning up old SMS/MMS text messages<commit_after>#!/usr/bin/env python3
''' Deletes old messages from a backup file created by Titanium Backup Pro
'''
import datetime
import lxml.etree
import shutil
import sys
MAXIMUM_MESSAGE_AGE_IN_DAYS = 365
if len(sys.argv) < 2:
sys.exit('USAGE: %s /path/to/com.keramidas.virtual.XML_MESSAGES-XXXXXXXX-XXXXXX.xml' % (sys.argv[0]))
infile_name = sys.argv[1]
# Create a backup copy since we'll modify the original
outfile_name = infile_name + '.bak'
shutil.copy2(infile_name, outfile_name)
# Remove any SMS/MMS messages older than MAXIMUM_MESSAGE_AGE_IN_DAYS
root = lxml.etree.parse(infile_name)
for element in root.iter():
if element.tag == '{http://www.titaniumtrack.com/ns/titanium-backup/messages}sms' \
or element.tag == '{http://www.titaniumtrack.com/ns/titanium-backup/messages}mms':
message_date = datetime.datetime.strptime(element.get('date'), '%Y-%m-%dT%H:%M:%S.%fZ')
if datetime.datetime.now() - message_date > datetime.timedelta(MAXIMUM_MESSAGE_AGE_IN_DAYS):
element.getparent().remove(element)
with open(infile_name, 'wb') as infile:
infile.write(lxml.etree.tostring(root, pretty_print=True, xml_declaration=True))
|
|
ba907a0c12c5bf90fa796d36fe18218df12281ae
|
printers.py
|
printers.py
|
import gdb
import re
class ShortVectorPrinter:
def __init__(self, val):
self.val = val
def to_string(self):
size = int(self.val['_size'])
N = int(self.val.type.template_argument(1))
cap = N if size <= N else int(self.val['_capacity'])
return 'MArray::short_vector<%d> of length %d, capacity %d' % (N, size, cap)
def children(self):
size = int(self.val['_size'])
data = self.val['_alloc']['_data']
for i in range(size):
yield ('[%d]' % i, data.dereference())
data = data + 1
def display_hint(self):
return 'array'
def str_lookup_function(val):
lookup_tag = val.type.strip_typedefs().tag
if lookup_tag == None:
return None
regex = re.compile("^MArray::short_vector<.*>$")
if regex.match(lookup_tag):
return ShortVectorPrinter(val)
return None
gdb.pretty_printers.append(str_lookup_function)
|
Add GDB pretty-printer for short_vector<T,N>
|
Add GDB pretty-printer for short_vector<T,N>
|
Python
|
bsd-3-clause
|
devinamatthews/marray,devinamatthews/marray,devinamatthews/marray
|
Add GDB pretty-printer for short_vector<T,N>
|
import gdb
import re
class ShortVectorPrinter:
def __init__(self, val):
self.val = val
def to_string(self):
size = int(self.val['_size'])
N = int(self.val.type.template_argument(1))
cap = N if size <= N else int(self.val['_capacity'])
return 'MArray::short_vector<%d> of length %d, capacity %d' % (N, size, cap)
def children(self):
size = int(self.val['_size'])
data = self.val['_alloc']['_data']
for i in range(size):
yield ('[%d]' % i, data.dereference())
data = data + 1
def display_hint(self):
return 'array'
def str_lookup_function(val):
lookup_tag = val.type.strip_typedefs().tag
if lookup_tag == None:
return None
regex = re.compile("^MArray::short_vector<.*>$")
if regex.match(lookup_tag):
return ShortVectorPrinter(val)
return None
gdb.pretty_printers.append(str_lookup_function)
|
<commit_before><commit_msg>Add GDB pretty-printer for short_vector<T,N><commit_after>
|
import gdb
import re
class ShortVectorPrinter:
def __init__(self, val):
self.val = val
def to_string(self):
size = int(self.val['_size'])
N = int(self.val.type.template_argument(1))
cap = N if size <= N else int(self.val['_capacity'])
return 'MArray::short_vector<%d> of length %d, capacity %d' % (N, size, cap)
def children(self):
size = int(self.val['_size'])
data = self.val['_alloc']['_data']
for i in range(size):
yield ('[%d]' % i, data.dereference())
data = data + 1
def display_hint(self):
return 'array'
def str_lookup_function(val):
lookup_tag = val.type.strip_typedefs().tag
if lookup_tag == None:
return None
regex = re.compile("^MArray::short_vector<.*>$")
if regex.match(lookup_tag):
return ShortVectorPrinter(val)
return None
gdb.pretty_printers.append(str_lookup_function)
|
Add GDB pretty-printer for short_vector<T,N>import gdb
import re
class ShortVectorPrinter:
def __init__(self, val):
self.val = val
def to_string(self):
size = int(self.val['_size'])
N = int(self.val.type.template_argument(1))
cap = N if size <= N else int(self.val['_capacity'])
return 'MArray::short_vector<%d> of length %d, capacity %d' % (N, size, cap)
def children(self):
size = int(self.val['_size'])
data = self.val['_alloc']['_data']
for i in range(size):
yield ('[%d]' % i, data.dereference())
data = data + 1
def display_hint(self):
return 'array'
def str_lookup_function(val):
lookup_tag = val.type.strip_typedefs().tag
if lookup_tag == None:
return None
regex = re.compile("^MArray::short_vector<.*>$")
if regex.match(lookup_tag):
return ShortVectorPrinter(val)
return None
gdb.pretty_printers.append(str_lookup_function)
|
<commit_before><commit_msg>Add GDB pretty-printer for short_vector<T,N><commit_after>import gdb
import re
class ShortVectorPrinter:
def __init__(self, val):
self.val = val
def to_string(self):
size = int(self.val['_size'])
N = int(self.val.type.template_argument(1))
cap = N if size <= N else int(self.val['_capacity'])
return 'MArray::short_vector<%d> of length %d, capacity %d' % (N, size, cap)
def children(self):
size = int(self.val['_size'])
data = self.val['_alloc']['_data']
for i in range(size):
yield ('[%d]' % i, data.dereference())
data = data + 1
def display_hint(self):
return 'array'
def str_lookup_function(val):
lookup_tag = val.type.strip_typedefs().tag
if lookup_tag == None:
return None
regex = re.compile("^MArray::short_vector<.*>$")
if regex.match(lookup_tag):
return ShortVectorPrinter(val)
return None
gdb.pretty_printers.append(str_lookup_function)
|
|
ca30516df8037cfb0745f84481f7ada936447a8a
|
vkfeed/pages/main.py
|
vkfeed/pages/main.py
|
# -*- coding: utf-8 -*-
'''Generates the main page.'''
import re
from google.appengine.ext import webapp
import vkfeed.util
class MainPage(webapp.RequestHandler):
'''Generates the main page.'''
def get(self):
'''Processes a GET request.'''
self.response.out.write(vkfeed.util.render_template('main.html'))
def post(self):
'''Processes a POST request.'''
profile_url = self.request.get('profile_url', '')
match = re.match(r'''^
\s*
(?:https?://(?:www\.)?(?:vk\.com|vkontakte\.ru)/)?
(?P<profile_id>[a-zA-Z0-9_-]{5,})/?
\s*
$''', profile_url, re.IGNORECASE | re.VERBOSE)
if match:
self.redirect('/feed/' + match.group('profile_id') + '/wall')
else:
self.response.out.write(vkfeed.util.render_template('main.html', {
'post_error': u'''
Неверно указан URL профиля.
Адрес должен быть вида http://vkontakte.ru/имя_профиля.
Имя профиля должно удовлетворять требованиям, предъявляемым администрацией ВКонтакте.
'''
}))
|
# -*- coding: utf-8 -*-
'''Generates the main page.'''
import re
from google.appengine.ext import webapp
import vkfeed.util
class MainPage(webapp.RequestHandler):
'''Generates the main page.'''
def get(self):
'''Processes a GET request.'''
self.response.out.write(vkfeed.util.render_template('main.html'))
def post(self):
'''Processes a POST request.'''
profile_url = self.request.get('profile_url', '')
match = re.match(r'''^
\s*
(?:https?://(?:www\.)?(?:vk\.com|vkontakte\.ru)/)?
(?P<profile_id>[a-zA-Z0-9._-]{5,})/?
\s*
$''', profile_url, re.IGNORECASE | re.VERBOSE)
if match:
self.redirect('/feed/' + match.group('profile_id') + '/wall')
else:
self.response.out.write(vkfeed.util.render_template('main.html', {
'post_error': u'''
Неверно указан URL профиля.
Адрес должен быть вида http://vkontakte.ru/имя_профиля.
Имя профиля должно удовлетворять требованиям, предъявляемым администрацией ВКонтакте.
'''
}))
|
Support of old profile names
|
Support of old profile names
|
Python
|
bsd-2-clause
|
zhenkyn/vkfeedd,zhenkyn/vkfeedd,tol1k/vkfeed,kostkost/vkfeed,ALERTua/vkfeed,flyer2001/vkrss,KonishchevDmitry/vkfeed,antonsotin/vkfeedtrue,Densvin/RSSVK,ALERTua/vkfeed,Evorvian/vkfeed,KonishchevDmitry/vkfeed,flyer2001/vkrss,greengeez/vkfeed,schelkovo/rss,greengeez/vkfeed,lokineverdie/parservkrss1488,lokineverdie/parservkrss1488,antonsotin/vkfeedtrue,ByKRAK/app,Evorvian/vkfeed,schelkovo/rss,KonishchevDmitry/vkfeed,ByKRAK/app,tol1k/vkfeed,Densvin/RSSVK,antonsotin/vkfeedtrue,kostkost/vkfeed
|
# -*- coding: utf-8 -*-
'''Generates the main page.'''
import re
from google.appengine.ext import webapp
import vkfeed.util
class MainPage(webapp.RequestHandler):
'''Generates the main page.'''
def get(self):
'''Processes a GET request.'''
self.response.out.write(vkfeed.util.render_template('main.html'))
def post(self):
'''Processes a POST request.'''
profile_url = self.request.get('profile_url', '')
match = re.match(r'''^
\s*
(?:https?://(?:www\.)?(?:vk\.com|vkontakte\.ru)/)?
(?P<profile_id>[a-zA-Z0-9_-]{5,})/?
\s*
$''', profile_url, re.IGNORECASE | re.VERBOSE)
if match:
self.redirect('/feed/' + match.group('profile_id') + '/wall')
else:
self.response.out.write(vkfeed.util.render_template('main.html', {
'post_error': u'''
Неверно указан URL профиля.
Адрес должен быть вида http://vkontakte.ru/имя_профиля.
Имя профиля должно удовлетворять требованиям, предъявляемым администрацией ВКонтакте.
'''
}))
Support of old profile names
|
# -*- coding: utf-8 -*-
'''Generates the main page.'''
import re
from google.appengine.ext import webapp
import vkfeed.util
class MainPage(webapp.RequestHandler):
'''Generates the main page.'''
def get(self):
'''Processes a GET request.'''
self.response.out.write(vkfeed.util.render_template('main.html'))
def post(self):
'''Processes a POST request.'''
profile_url = self.request.get('profile_url', '')
match = re.match(r'''^
\s*
(?:https?://(?:www\.)?(?:vk\.com|vkontakte\.ru)/)?
(?P<profile_id>[a-zA-Z0-9._-]{5,})/?
\s*
$''', profile_url, re.IGNORECASE | re.VERBOSE)
if match:
self.redirect('/feed/' + match.group('profile_id') + '/wall')
else:
self.response.out.write(vkfeed.util.render_template('main.html', {
'post_error': u'''
Неверно указан URL профиля.
Адрес должен быть вида http://vkontakte.ru/имя_профиля.
Имя профиля должно удовлетворять требованиям, предъявляемым администрацией ВКонтакте.
'''
}))
|
<commit_before># -*- coding: utf-8 -*-
'''Generates the main page.'''
import re
from google.appengine.ext import webapp
import vkfeed.util
class MainPage(webapp.RequestHandler):
'''Generates the main page.'''
def get(self):
'''Processes a GET request.'''
self.response.out.write(vkfeed.util.render_template('main.html'))
def post(self):
'''Processes a POST request.'''
profile_url = self.request.get('profile_url', '')
match = re.match(r'''^
\s*
(?:https?://(?:www\.)?(?:vk\.com|vkontakte\.ru)/)?
(?P<profile_id>[a-zA-Z0-9_-]{5,})/?
\s*
$''', profile_url, re.IGNORECASE | re.VERBOSE)
if match:
self.redirect('/feed/' + match.group('profile_id') + '/wall')
else:
self.response.out.write(vkfeed.util.render_template('main.html', {
'post_error': u'''
Неверно указан URL профиля.
Адрес должен быть вида http://vkontakte.ru/имя_профиля.
Имя профиля должно удовлетворять требованиям, предъявляемым администрацией ВКонтакте.
'''
}))
<commit_msg>Support of old profile names<commit_after>
|
# -*- coding: utf-8 -*-
'''Generates the main page.'''
import re
from google.appengine.ext import webapp
import vkfeed.util
class MainPage(webapp.RequestHandler):
'''Generates the main page.'''
def get(self):
'''Processes a GET request.'''
self.response.out.write(vkfeed.util.render_template('main.html'))
def post(self):
'''Processes a POST request.'''
profile_url = self.request.get('profile_url', '')
match = re.match(r'''^
\s*
(?:https?://(?:www\.)?(?:vk\.com|vkontakte\.ru)/)?
(?P<profile_id>[a-zA-Z0-9._-]{5,})/?
\s*
$''', profile_url, re.IGNORECASE | re.VERBOSE)
if match:
self.redirect('/feed/' + match.group('profile_id') + '/wall')
else:
self.response.out.write(vkfeed.util.render_template('main.html', {
'post_error': u'''
Неверно указан URL профиля.
Адрес должен быть вида http://vkontakte.ru/имя_профиля.
Имя профиля должно удовлетворять требованиям, предъявляемым администрацией ВКонтакте.
'''
}))
|
# -*- coding: utf-8 -*-
'''Generates the main page.'''
import re
from google.appengine.ext import webapp
import vkfeed.util
class MainPage(webapp.RequestHandler):
'''Generates the main page.'''
def get(self):
'''Processes a GET request.'''
self.response.out.write(vkfeed.util.render_template('main.html'))
def post(self):
'''Processes a POST request.'''
profile_url = self.request.get('profile_url', '')
match = re.match(r'''^
\s*
(?:https?://(?:www\.)?(?:vk\.com|vkontakte\.ru)/)?
(?P<profile_id>[a-zA-Z0-9_-]{5,})/?
\s*
$''', profile_url, re.IGNORECASE | re.VERBOSE)
if match:
self.redirect('/feed/' + match.group('profile_id') + '/wall')
else:
self.response.out.write(vkfeed.util.render_template('main.html', {
'post_error': u'''
Неверно указан URL профиля.
Адрес должен быть вида http://vkontakte.ru/имя_профиля.
Имя профиля должно удовлетворять требованиям, предъявляемым администрацией ВКонтакте.
'''
}))
Support of old profile names# -*- coding: utf-8 -*-
'''Generates the main page.'''
import re
from google.appengine.ext import webapp
import vkfeed.util
class MainPage(webapp.RequestHandler):
'''Generates the main page.'''
def get(self):
'''Processes a GET request.'''
self.response.out.write(vkfeed.util.render_template('main.html'))
def post(self):
'''Processes a POST request.'''
profile_url = self.request.get('profile_url', '')
match = re.match(r'''^
\s*
(?:https?://(?:www\.)?(?:vk\.com|vkontakte\.ru)/)?
(?P<profile_id>[a-zA-Z0-9._-]{5,})/?
\s*
$''', profile_url, re.IGNORECASE | re.VERBOSE)
if match:
self.redirect('/feed/' + match.group('profile_id') + '/wall')
else:
self.response.out.write(vkfeed.util.render_template('main.html', {
'post_error': u'''
Неверно указан URL профиля.
Адрес должен быть вида http://vkontakte.ru/имя_профиля.
Имя профиля должно удовлетворять требованиям, предъявляемым администрацией ВКонтакте.
'''
}))
|
<commit_before># -*- coding: utf-8 -*-
'''Generates the main page.'''
import re
from google.appengine.ext import webapp
import vkfeed.util
class MainPage(webapp.RequestHandler):
'''Generates the main page.'''
def get(self):
'''Processes a GET request.'''
self.response.out.write(vkfeed.util.render_template('main.html'))
def post(self):
'''Processes a POST request.'''
profile_url = self.request.get('profile_url', '')
match = re.match(r'''^
\s*
(?:https?://(?:www\.)?(?:vk\.com|vkontakte\.ru)/)?
(?P<profile_id>[a-zA-Z0-9_-]{5,})/?
\s*
$''', profile_url, re.IGNORECASE | re.VERBOSE)
if match:
self.redirect('/feed/' + match.group('profile_id') + '/wall')
else:
self.response.out.write(vkfeed.util.render_template('main.html', {
'post_error': u'''
Неверно указан URL профиля.
Адрес должен быть вида http://vkontakte.ru/имя_профиля.
Имя профиля должно удовлетворять требованиям, предъявляемым администрацией ВКонтакте.
'''
}))
<commit_msg>Support of old profile names<commit_after># -*- coding: utf-8 -*-
'''Generates the main page.'''
import re
from google.appengine.ext import webapp
import vkfeed.util
class MainPage(webapp.RequestHandler):
'''Generates the main page.'''
def get(self):
'''Processes a GET request.'''
self.response.out.write(vkfeed.util.render_template('main.html'))
def post(self):
'''Processes a POST request.'''
profile_url = self.request.get('profile_url', '')
match = re.match(r'''^
\s*
(?:https?://(?:www\.)?(?:vk\.com|vkontakte\.ru)/)?
(?P<profile_id>[a-zA-Z0-9._-]{5,})/?
\s*
$''', profile_url, re.IGNORECASE | re.VERBOSE)
if match:
self.redirect('/feed/' + match.group('profile_id') + '/wall')
else:
self.response.out.write(vkfeed.util.render_template('main.html', {
'post_error': u'''
Неверно указан URL профиля.
Адрес должен быть вида http://vkontakte.ru/имя_профиля.
Имя профиля должно удовлетворять требованиям, предъявляемым администрацией ВКонтакте.
'''
}))
|
6080a7475daef38037b8e8462a7a734380179e3f
|
scripts/ensure_click_help.py
|
scripts/ensure_click_help.py
|
import ast
import argparse
import sys
def stringify_name(name: ast.AST):
if isinstance(name, ast.Attribute):
return f"{stringify_name(name.value)}.{stringify_name(name.attr)}"
if isinstance(name, ast.Name):
return name.id
if isinstance(name, str):
return name
raise NotImplementedError(f"unstringifiable name/node {name} ({type(name)})")
class EnsureClickHelpWalker(ast.NodeVisitor):
def __init__(self, add_message):
self.add_message = add_message
def visit_FunctionDef(self, node):
for deco in node.decorator_list:
self.process_decorator(deco)
def process_decorator(self, deco):
if isinstance(deco, ast.Call):
deco_name = stringify_name(deco.func)
if deco_name in ("click.option",):
kwargs = {stringify_name(kw.arg): kw.value for kw in deco.keywords}
if "help" not in kwargs:
self.add_message(deco, f"missing `help=`")
def process_file(filename):
with open(filename, "r") as infp:
tree = ast.parse(infp.read(), filename=filename)
messages = []
def add_message(node, message):
messages.append(f"{filename}:{node.lineno}: {message}")
EnsureClickHelpWalker(add_message=add_message).visit(tree)
return messages
def main():
ap = argparse.ArgumentParser()
ap.add_argument("files", metavar="FILE", nargs="*")
args = ap.parse_args()
n_messages = 0
for file in args.files:
for message in process_file(file):
print(message)
n_messages += 1
sys.exit(n_messages)
if __name__ == "__main__":
main()
|
Add script to find out click annotations missing help text
|
Add script to find out click annotations missing help text
|
Python
|
mit
|
valohai/valohai-cli
|
Add script to find out click annotations missing help text
|
import ast
import argparse
import sys
def stringify_name(name: ast.AST):
if isinstance(name, ast.Attribute):
return f"{stringify_name(name.value)}.{stringify_name(name.attr)}"
if isinstance(name, ast.Name):
return name.id
if isinstance(name, str):
return name
raise NotImplementedError(f"unstringifiable name/node {name} ({type(name)})")
class EnsureClickHelpWalker(ast.NodeVisitor):
def __init__(self, add_message):
self.add_message = add_message
def visit_FunctionDef(self, node):
for deco in node.decorator_list:
self.process_decorator(deco)
def process_decorator(self, deco):
if isinstance(deco, ast.Call):
deco_name = stringify_name(deco.func)
if deco_name in ("click.option",):
kwargs = {stringify_name(kw.arg): kw.value for kw in deco.keywords}
if "help" not in kwargs:
self.add_message(deco, f"missing `help=`")
def process_file(filename):
with open(filename, "r") as infp:
tree = ast.parse(infp.read(), filename=filename)
messages = []
def add_message(node, message):
messages.append(f"{filename}:{node.lineno}: {message}")
EnsureClickHelpWalker(add_message=add_message).visit(tree)
return messages
def main():
ap = argparse.ArgumentParser()
ap.add_argument("files", metavar="FILE", nargs="*")
args = ap.parse_args()
n_messages = 0
for file in args.files:
for message in process_file(file):
print(message)
n_messages += 1
sys.exit(n_messages)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add script to find out click annotations missing help text<commit_after>
|
import ast
import argparse
import sys
def stringify_name(name: ast.AST):
if isinstance(name, ast.Attribute):
return f"{stringify_name(name.value)}.{stringify_name(name.attr)}"
if isinstance(name, ast.Name):
return name.id
if isinstance(name, str):
return name
raise NotImplementedError(f"unstringifiable name/node {name} ({type(name)})")
class EnsureClickHelpWalker(ast.NodeVisitor):
def __init__(self, add_message):
self.add_message = add_message
def visit_FunctionDef(self, node):
for deco in node.decorator_list:
self.process_decorator(deco)
def process_decorator(self, deco):
if isinstance(deco, ast.Call):
deco_name = stringify_name(deco.func)
if deco_name in ("click.option",):
kwargs = {stringify_name(kw.arg): kw.value for kw in deco.keywords}
if "help" not in kwargs:
self.add_message(deco, f"missing `help=`")
def process_file(filename):
with open(filename, "r") as infp:
tree = ast.parse(infp.read(), filename=filename)
messages = []
def add_message(node, message):
messages.append(f"{filename}:{node.lineno}: {message}")
EnsureClickHelpWalker(add_message=add_message).visit(tree)
return messages
def main():
ap = argparse.ArgumentParser()
ap.add_argument("files", metavar="FILE", nargs="*")
args = ap.parse_args()
n_messages = 0
for file in args.files:
for message in process_file(file):
print(message)
n_messages += 1
sys.exit(n_messages)
if __name__ == "__main__":
main()
|
Add script to find out click annotations missing help textimport ast
import argparse
import sys
def stringify_name(name: ast.AST):
if isinstance(name, ast.Attribute):
return f"{stringify_name(name.value)}.{stringify_name(name.attr)}"
if isinstance(name, ast.Name):
return name.id
if isinstance(name, str):
return name
raise NotImplementedError(f"unstringifiable name/node {name} ({type(name)})")
class EnsureClickHelpWalker(ast.NodeVisitor):
def __init__(self, add_message):
self.add_message = add_message
def visit_FunctionDef(self, node):
for deco in node.decorator_list:
self.process_decorator(deco)
def process_decorator(self, deco):
if isinstance(deco, ast.Call):
deco_name = stringify_name(deco.func)
if deco_name in ("click.option",):
kwargs = {stringify_name(kw.arg): kw.value for kw in deco.keywords}
if "help" not in kwargs:
self.add_message(deco, f"missing `help=`")
def process_file(filename):
with open(filename, "r") as infp:
tree = ast.parse(infp.read(), filename=filename)
messages = []
def add_message(node, message):
messages.append(f"{filename}:{node.lineno}: {message}")
EnsureClickHelpWalker(add_message=add_message).visit(tree)
return messages
def main():
ap = argparse.ArgumentParser()
ap.add_argument("files", metavar="FILE", nargs="*")
args = ap.parse_args()
n_messages = 0
for file in args.files:
for message in process_file(file):
print(message)
n_messages += 1
sys.exit(n_messages)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add script to find out click annotations missing help text<commit_after>import ast
import argparse
import sys
def stringify_name(name: ast.AST):
if isinstance(name, ast.Attribute):
return f"{stringify_name(name.value)}.{stringify_name(name.attr)}"
if isinstance(name, ast.Name):
return name.id
if isinstance(name, str):
return name
raise NotImplementedError(f"unstringifiable name/node {name} ({type(name)})")
class EnsureClickHelpWalker(ast.NodeVisitor):
def __init__(self, add_message):
self.add_message = add_message
def visit_FunctionDef(self, node):
for deco in node.decorator_list:
self.process_decorator(deco)
def process_decorator(self, deco):
if isinstance(deco, ast.Call):
deco_name = stringify_name(deco.func)
if deco_name in ("click.option",):
kwargs = {stringify_name(kw.arg): kw.value for kw in deco.keywords}
if "help" not in kwargs:
self.add_message(deco, f"missing `help=`")
def process_file(filename):
with open(filename, "r") as infp:
tree = ast.parse(infp.read(), filename=filename)
messages = []
def add_message(node, message):
messages.append(f"{filename}:{node.lineno}: {message}")
EnsureClickHelpWalker(add_message=add_message).visit(tree)
return messages
def main():
ap = argparse.ArgumentParser()
ap.add_argument("files", metavar="FILE", nargs="*")
args = ap.parse_args()
n_messages = 0
for file in args.files:
for message in process_file(file):
print(message)
n_messages += 1
sys.exit(n_messages)
if __name__ == "__main__":
main()
|
|
a142472b86044395e428abea2fa5ff28b892d1fb
|
attr_utils.py
|
attr_utils.py
|
def _set_attr( obj, attr_name, value_to_set ):
setattr( obj, attr_name, value_to_set )
return getattr( obj, attr_name )
def _memoize_attr( obj, attr_name, value_to_set ):
return getattr( obj, attr_name, _set_attr( obj, attr_name, value_to_set ) )
|
Create attr-utils module; create attribute memoization helper
|
Create attr-utils module; create attribute memoization helper
|
Python
|
mit
|
fire-uta/iiix-data-parser
|
Create attr-utils module; create attribute memoization helper
|
def _set_attr( obj, attr_name, value_to_set ):
setattr( obj, attr_name, value_to_set )
return getattr( obj, attr_name )
def _memoize_attr( obj, attr_name, value_to_set ):
return getattr( obj, attr_name, _set_attr( obj, attr_name, value_to_set ) )
|
<commit_before><commit_msg>Create attr-utils module; create attribute memoization helper<commit_after>
|
def _set_attr( obj, attr_name, value_to_set ):
setattr( obj, attr_name, value_to_set )
return getattr( obj, attr_name )
def _memoize_attr( obj, attr_name, value_to_set ):
return getattr( obj, attr_name, _set_attr( obj, attr_name, value_to_set ) )
|
Create attr-utils module; create attribute memoization helperdef _set_attr( obj, attr_name, value_to_set ):
setattr( obj, attr_name, value_to_set )
return getattr( obj, attr_name )
def _memoize_attr( obj, attr_name, value_to_set ):
return getattr( obj, attr_name, _set_attr( obj, attr_name, value_to_set ) )
|
<commit_before><commit_msg>Create attr-utils module; create attribute memoization helper<commit_after>def _set_attr( obj, attr_name, value_to_set ):
setattr( obj, attr_name, value_to_set )
return getattr( obj, attr_name )
def _memoize_attr( obj, attr_name, value_to_set ):
return getattr( obj, attr_name, _set_attr( obj, attr_name, value_to_set ) )
|
|
72ffe438270fb54a1ce163faff6d2083079c77e2
|
anydo/lib/tests/test_utils.py
|
anydo/lib/tests/test_utils.py
|
# -*- coding: utf-8 -*-
import unittest
import re
import sys
import os.path
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
from anydo.lib import utils
class UtilsTests(unittest.TestCase):
def setUp(self):
self.pattern = re.compile('(^([\w-]+)==$)', flags=re.U)
def test_create_uuid(self):
self.assertTrue(self.pattern.match(utils.create_uuid()))
def test_encode_string(self):
self.assertEqual(utils.encode_string('test'), 'test')
self.assertEqual(utils.encode_string('1234'), '1234')
self.assertEqual(utils.encode_string('test1234 Äë'), 'test1234 Äë')
# "テスト" means "test" in Japansese.
if sys.version_info < (3, 0):
word = ('\xe3\x83\x86\xe3\x82\xb9\xe3\x83\x88 123eA'
'\xe3\x83\x86\xe3\x82\xb9\xe3\x83\x88')
else:
word = 'テスト 123eAテスト'
self.assertEqual(utils.encode_string('テスト 123eAテスト'), word)
|
Add unit test of utils module.
|
Add unit test of utils module.
Signed-off-by: Kouhei Maeda <c9f1823971fa1a4c79cdb50b3311094021cee31e@palmtb.net>
|
Python
|
mit
|
gvkalra/python-anydo,gvkalra/python-anydo
|
Add unit test of utils module.
Signed-off-by: Kouhei Maeda <c9f1823971fa1a4c79cdb50b3311094021cee31e@palmtb.net>
|
# -*- coding: utf-8 -*-
import unittest
import re
import sys
import os.path
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
from anydo.lib import utils
class UtilsTests(unittest.TestCase):
def setUp(self):
self.pattern = re.compile('(^([\w-]+)==$)', flags=re.U)
def test_create_uuid(self):
self.assertTrue(self.pattern.match(utils.create_uuid()))
def test_encode_string(self):
self.assertEqual(utils.encode_string('test'), 'test')
self.assertEqual(utils.encode_string('1234'), '1234')
self.assertEqual(utils.encode_string('test1234 Äë'), 'test1234 Äë')
# "テスト" means "test" in Japansese.
if sys.version_info < (3, 0):
word = ('\xe3\x83\x86\xe3\x82\xb9\xe3\x83\x88 123eA'
'\xe3\x83\x86\xe3\x82\xb9\xe3\x83\x88')
else:
word = 'テスト 123eAテスト'
self.assertEqual(utils.encode_string('テスト 123eAテスト'), word)
|
<commit_before><commit_msg>Add unit test of utils module.
Signed-off-by: Kouhei Maeda <c9f1823971fa1a4c79cdb50b3311094021cee31e@palmtb.net><commit_after>
|
# -*- coding: utf-8 -*-
import unittest
import re
import sys
import os.path
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
from anydo.lib import utils
class UtilsTests(unittest.TestCase):
def setUp(self):
self.pattern = re.compile('(^([\w-]+)==$)', flags=re.U)
def test_create_uuid(self):
self.assertTrue(self.pattern.match(utils.create_uuid()))
def test_encode_string(self):
self.assertEqual(utils.encode_string('test'), 'test')
self.assertEqual(utils.encode_string('1234'), '1234')
self.assertEqual(utils.encode_string('test1234 Äë'), 'test1234 Äë')
# "テスト" means "test" in Japansese.
if sys.version_info < (3, 0):
word = ('\xe3\x83\x86\xe3\x82\xb9\xe3\x83\x88 123eA'
'\xe3\x83\x86\xe3\x82\xb9\xe3\x83\x88')
else:
word = 'テスト 123eAテスト'
self.assertEqual(utils.encode_string('テスト 123eAテスト'), word)
|
Add unit test of utils module.
Signed-off-by: Kouhei Maeda <c9f1823971fa1a4c79cdb50b3311094021cee31e@palmtb.net># -*- coding: utf-8 -*-
import unittest
import re
import sys
import os.path
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
from anydo.lib import utils
class UtilsTests(unittest.TestCase):
def setUp(self):
self.pattern = re.compile('(^([\w-]+)==$)', flags=re.U)
def test_create_uuid(self):
self.assertTrue(self.pattern.match(utils.create_uuid()))
def test_encode_string(self):
self.assertEqual(utils.encode_string('test'), 'test')
self.assertEqual(utils.encode_string('1234'), '1234')
self.assertEqual(utils.encode_string('test1234 Äë'), 'test1234 Äë')
# "テスト" means "test" in Japansese.
if sys.version_info < (3, 0):
word = ('\xe3\x83\x86\xe3\x82\xb9\xe3\x83\x88 123eA'
'\xe3\x83\x86\xe3\x82\xb9\xe3\x83\x88')
else:
word = 'テスト 123eAテスト'
self.assertEqual(utils.encode_string('テスト 123eAテスト'), word)
|
<commit_before><commit_msg>Add unit test of utils module.
Signed-off-by: Kouhei Maeda <c9f1823971fa1a4c79cdb50b3311094021cee31e@palmtb.net><commit_after># -*- coding: utf-8 -*-
import unittest
import re
import sys
import os.path
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
from anydo.lib import utils
class UtilsTests(unittest.TestCase):
def setUp(self):
self.pattern = re.compile('(^([\w-]+)==$)', flags=re.U)
def test_create_uuid(self):
self.assertTrue(self.pattern.match(utils.create_uuid()))
def test_encode_string(self):
self.assertEqual(utils.encode_string('test'), 'test')
self.assertEqual(utils.encode_string('1234'), '1234')
self.assertEqual(utils.encode_string('test1234 Äë'), 'test1234 Äë')
# "テスト" means "test" in Japansese.
if sys.version_info < (3, 0):
word = ('\xe3\x83\x86\xe3\x82\xb9\xe3\x83\x88 123eA'
'\xe3\x83\x86\xe3\x82\xb9\xe3\x83\x88')
else:
word = 'テスト 123eAテスト'
self.assertEqual(utils.encode_string('テスト 123eAテスト'), word)
|
|
36a2f0de9f525ea030e4cc805b8ccc7eb29c8098
|
src/vimapt/library/vimapt/Dependency.py
|
src/vimapt/library/vimapt/Dependency.py
|
import networkx as nx
class Dependency(object):
def __init__(self, package_name):
self.package_name = package_name
self.dependency_graph = nx.DiGraph()
self.dependency_graph.add_node(self.package_name)
self.top_node_name = self.package_name
def parse(self, dependency_specification):
node_name = self.get_node_name(dependency_specification)
dependency_specification_list = self.get_dependency_specification_list(dependency_specification)
for child_dependency_specification in dependency_specification_list:
child_node_name = self.get_node_name(child_dependency_specification)
self.dependency_graph.add_node(child_node_name)
self.dependency_graph.add_edge(node_name, child_node_name)
self.parse(child_dependency_specification)
def get_dependency_specification_list(self, dependency_specification):
return []
def get_node_name(self, dependency_specification):
return []
|
Add dependency function (not finished yet)
|
Add dependency function (not finished yet)
|
Python
|
mit
|
howl-anderson/vimapt,howl-anderson/vimapt
|
Add dependency function (not finished yet)
|
import networkx as nx
class Dependency(object):
def __init__(self, package_name):
self.package_name = package_name
self.dependency_graph = nx.DiGraph()
self.dependency_graph.add_node(self.package_name)
self.top_node_name = self.package_name
def parse(self, dependency_specification):
node_name = self.get_node_name(dependency_specification)
dependency_specification_list = self.get_dependency_specification_list(dependency_specification)
for child_dependency_specification in dependency_specification_list:
child_node_name = self.get_node_name(child_dependency_specification)
self.dependency_graph.add_node(child_node_name)
self.dependency_graph.add_edge(node_name, child_node_name)
self.parse(child_dependency_specification)
def get_dependency_specification_list(self, dependency_specification):
return []
def get_node_name(self, dependency_specification):
return []
|
<commit_before><commit_msg>Add dependency function (not finished yet)<commit_after>
|
import networkx as nx
class Dependency(object):
def __init__(self, package_name):
self.package_name = package_name
self.dependency_graph = nx.DiGraph()
self.dependency_graph.add_node(self.package_name)
self.top_node_name = self.package_name
def parse(self, dependency_specification):
node_name = self.get_node_name(dependency_specification)
dependency_specification_list = self.get_dependency_specification_list(dependency_specification)
for child_dependency_specification in dependency_specification_list:
child_node_name = self.get_node_name(child_dependency_specification)
self.dependency_graph.add_node(child_node_name)
self.dependency_graph.add_edge(node_name, child_node_name)
self.parse(child_dependency_specification)
def get_dependency_specification_list(self, dependency_specification):
return []
def get_node_name(self, dependency_specification):
return []
|
Add dependency function (not finished yet)import networkx as nx
class Dependency(object):
def __init__(self, package_name):
self.package_name = package_name
self.dependency_graph = nx.DiGraph()
self.dependency_graph.add_node(self.package_name)
self.top_node_name = self.package_name
def parse(self, dependency_specification):
node_name = self.get_node_name(dependency_specification)
dependency_specification_list = self.get_dependency_specification_list(dependency_specification)
for child_dependency_specification in dependency_specification_list:
child_node_name = self.get_node_name(child_dependency_specification)
self.dependency_graph.add_node(child_node_name)
self.dependency_graph.add_edge(node_name, child_node_name)
self.parse(child_dependency_specification)
def get_dependency_specification_list(self, dependency_specification):
return []
def get_node_name(self, dependency_specification):
return []
|
<commit_before><commit_msg>Add dependency function (not finished yet)<commit_after>import networkx as nx
class Dependency(object):
def __init__(self, package_name):
self.package_name = package_name
self.dependency_graph = nx.DiGraph()
self.dependency_graph.add_node(self.package_name)
self.top_node_name = self.package_name
def parse(self, dependency_specification):
node_name = self.get_node_name(dependency_specification)
dependency_specification_list = self.get_dependency_specification_list(dependency_specification)
for child_dependency_specification in dependency_specification_list:
child_node_name = self.get_node_name(child_dependency_specification)
self.dependency_graph.add_node(child_node_name)
self.dependency_graph.add_edge(node_name, child_node_name)
self.parse(child_dependency_specification)
def get_dependency_specification_list(self, dependency_specification):
return []
def get_node_name(self, dependency_specification):
return []
|
|
0a0a9addea16d5adf4d9edb3d56e1d890b6214e5
|
st2common/tests/unit/test_db_fields.py
|
st2common/tests/unit/test_db_fields.py
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import datetime
import unittest2
from st2common.fields import ComplexDateTimeField
class ComplexDateTimeFieldTestCase(unittest2.TestCase):
def test_round_trip_conversion(self):
datetime_values = [
datetime.datetime(2015, 1, 1, 15, 0, 0).replace(microsecond=500),
datetime.datetime(2015, 1, 1, 15, 0, 0).replace(microsecond=0),
datetime.datetime(2015, 1, 1, 15, 0, 0).replace(microsecond=999999)
]
microsecond_values = []
# Calculate microsecond values
for value in datetime_values:
seconds = time.mktime(value.timetuple())
microseconds_reminder = value.time().microsecond
result = int(seconds * 1000000) + microseconds_reminder
microsecond_values.append(result)
field = ComplexDateTimeField()
# datetime to us
for index, value in enumerate(datetime_values):
actual_value = field._datetime_to_microseconds_since_epoch(value=value)
expected_value = microsecond_values[index]
expected_microseconds = value.time().microsecond
self.assertEqual(actual_value, expected_value)
self.assertTrue(str(actual_value).endswith(str(expected_microseconds)))
# us to datetime
for index, value in enumerate(microsecond_values):
actual_value = field._microseconds_since_epoch_to_datetime(data=value)
expected_value = datetime_values[index]
self.assertEqual(actual_value, expected_value)
|
Add tests for new complext date time field.
|
Add tests for new complext date time field.
|
Python
|
apache-2.0
|
nzlosh/st2,punalpatel/st2,pixelrebel/st2,dennybaa/st2,StackStorm/st2,lakshmi-kannan/st2,nzlosh/st2,jtopjian/st2,peak6/st2,pinterb/st2,Plexxi/st2,tonybaloney/st2,jtopjian/st2,grengojbo/st2,emedvedev/st2,dennybaa/st2,armab/st2,StackStorm/st2,dennybaa/st2,tonybaloney/st2,jtopjian/st2,tonybaloney/st2,armab/st2,pixelrebel/st2,lakshmi-kannan/st2,pixelrebel/st2,emedvedev/st2,Plexxi/st2,Itxaka/st2,emedvedev/st2,nzlosh/st2,StackStorm/st2,alfasin/st2,grengojbo/st2,pinterb/st2,nzlosh/st2,Plexxi/st2,peak6/st2,peak6/st2,pinterb/st2,punalpatel/st2,grengojbo/st2,StackStorm/st2,lakshmi-kannan/st2,Itxaka/st2,Itxaka/st2,alfasin/st2,alfasin/st2,armab/st2,punalpatel/st2,Plexxi/st2
|
Add tests for new complext date time field.
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import datetime
import unittest2
from st2common.fields import ComplexDateTimeField
class ComplexDateTimeFieldTestCase(unittest2.TestCase):
def test_round_trip_conversion(self):
datetime_values = [
datetime.datetime(2015, 1, 1, 15, 0, 0).replace(microsecond=500),
datetime.datetime(2015, 1, 1, 15, 0, 0).replace(microsecond=0),
datetime.datetime(2015, 1, 1, 15, 0, 0).replace(microsecond=999999)
]
microsecond_values = []
# Calculate microsecond values
for value in datetime_values:
seconds = time.mktime(value.timetuple())
microseconds_reminder = value.time().microsecond
result = int(seconds * 1000000) + microseconds_reminder
microsecond_values.append(result)
field = ComplexDateTimeField()
# datetime to us
for index, value in enumerate(datetime_values):
actual_value = field._datetime_to_microseconds_since_epoch(value=value)
expected_value = microsecond_values[index]
expected_microseconds = value.time().microsecond
self.assertEqual(actual_value, expected_value)
self.assertTrue(str(actual_value).endswith(str(expected_microseconds)))
# us to datetime
for index, value in enumerate(microsecond_values):
actual_value = field._microseconds_since_epoch_to_datetime(data=value)
expected_value = datetime_values[index]
self.assertEqual(actual_value, expected_value)
|
<commit_before><commit_msg>Add tests for new complext date time field.<commit_after>
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import datetime
import unittest2
from st2common.fields import ComplexDateTimeField
class ComplexDateTimeFieldTestCase(unittest2.TestCase):
def test_round_trip_conversion(self):
datetime_values = [
datetime.datetime(2015, 1, 1, 15, 0, 0).replace(microsecond=500),
datetime.datetime(2015, 1, 1, 15, 0, 0).replace(microsecond=0),
datetime.datetime(2015, 1, 1, 15, 0, 0).replace(microsecond=999999)
]
microsecond_values = []
# Calculate microsecond values
for value in datetime_values:
seconds = time.mktime(value.timetuple())
microseconds_reminder = value.time().microsecond
result = int(seconds * 1000000) + microseconds_reminder
microsecond_values.append(result)
field = ComplexDateTimeField()
# datetime to us
for index, value in enumerate(datetime_values):
actual_value = field._datetime_to_microseconds_since_epoch(value=value)
expected_value = microsecond_values[index]
expected_microseconds = value.time().microsecond
self.assertEqual(actual_value, expected_value)
self.assertTrue(str(actual_value).endswith(str(expected_microseconds)))
# us to datetime
for index, value in enumerate(microsecond_values):
actual_value = field._microseconds_since_epoch_to_datetime(data=value)
expected_value = datetime_values[index]
self.assertEqual(actual_value, expected_value)
|
Add tests for new complext date time field.# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import datetime
import unittest2
from st2common.fields import ComplexDateTimeField
class ComplexDateTimeFieldTestCase(unittest2.TestCase):
def test_round_trip_conversion(self):
datetime_values = [
datetime.datetime(2015, 1, 1, 15, 0, 0).replace(microsecond=500),
datetime.datetime(2015, 1, 1, 15, 0, 0).replace(microsecond=0),
datetime.datetime(2015, 1, 1, 15, 0, 0).replace(microsecond=999999)
]
microsecond_values = []
# Calculate microsecond values
for value in datetime_values:
seconds = time.mktime(value.timetuple())
microseconds_reminder = value.time().microsecond
result = int(seconds * 1000000) + microseconds_reminder
microsecond_values.append(result)
field = ComplexDateTimeField()
# datetime to us
for index, value in enumerate(datetime_values):
actual_value = field._datetime_to_microseconds_since_epoch(value=value)
expected_value = microsecond_values[index]
expected_microseconds = value.time().microsecond
self.assertEqual(actual_value, expected_value)
self.assertTrue(str(actual_value).endswith(str(expected_microseconds)))
# us to datetime
for index, value in enumerate(microsecond_values):
actual_value = field._microseconds_since_epoch_to_datetime(data=value)
expected_value = datetime_values[index]
self.assertEqual(actual_value, expected_value)
|
<commit_before><commit_msg>Add tests for new complext date time field.<commit_after># Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import datetime
import unittest2
from st2common.fields import ComplexDateTimeField
class ComplexDateTimeFieldTestCase(unittest2.TestCase):
def test_round_trip_conversion(self):
datetime_values = [
datetime.datetime(2015, 1, 1, 15, 0, 0).replace(microsecond=500),
datetime.datetime(2015, 1, 1, 15, 0, 0).replace(microsecond=0),
datetime.datetime(2015, 1, 1, 15, 0, 0).replace(microsecond=999999)
]
microsecond_values = []
# Calculate microsecond values
for value in datetime_values:
seconds = time.mktime(value.timetuple())
microseconds_reminder = value.time().microsecond
result = int(seconds * 1000000) + microseconds_reminder
microsecond_values.append(result)
field = ComplexDateTimeField()
# datetime to us
for index, value in enumerate(datetime_values):
actual_value = field._datetime_to_microseconds_since_epoch(value=value)
expected_value = microsecond_values[index]
expected_microseconds = value.time().microsecond
self.assertEqual(actual_value, expected_value)
self.assertTrue(str(actual_value).endswith(str(expected_microseconds)))
# us to datetime
for index, value in enumerate(microsecond_values):
actual_value = field._microseconds_since_epoch_to_datetime(data=value)
expected_value = datetime_values[index]
self.assertEqual(actual_value, expected_value)
|
|
1b6966cf0e90da0ac060a43349bbe4ce0f5fc365
|
src/whitelist/whitelist_form.py
|
src/whitelist/whitelist_form.py
|
from django.forms import ModelForm
from whitelist.models import Player
class WhitelistForm(ModelForm):
""" Automatically generate a form based on the Player model
"""
class Meta:
model = Player
fields = ('ign', 'email')
|
Create a model-based form for whitelist requests
|
Create a model-based form for whitelist requests
|
Python
|
mit
|
Jonpro03/Minecrunch_Web,Jonpro03/Minecrunch_Web,Jonpro03/Minecrunch_Web
|
Create a model-based form for whitelist requests
|
from django.forms import ModelForm
from whitelist.models import Player
class WhitelistForm(ModelForm):
""" Automatically generate a form based on the Player model
"""
class Meta:
model = Player
fields = ('ign', 'email')
|
<commit_before><commit_msg>Create a model-based form for whitelist requests<commit_after>
|
from django.forms import ModelForm
from whitelist.models import Player
class WhitelistForm(ModelForm):
""" Automatically generate a form based on the Player model
"""
class Meta:
model = Player
fields = ('ign', 'email')
|
Create a model-based form for whitelist requestsfrom django.forms import ModelForm
from whitelist.models import Player
class WhitelistForm(ModelForm):
""" Automatically generate a form based on the Player model
"""
class Meta:
model = Player
fields = ('ign', 'email')
|
<commit_before><commit_msg>Create a model-based form for whitelist requests<commit_after>from django.forms import ModelForm
from whitelist.models import Player
class WhitelistForm(ModelForm):
""" Automatically generate a form based on the Player model
"""
class Meta:
model = Player
fields = ('ign', 'email')
|
|
e78e93d5f2a3c35e92b03ba85f0cdad9a5f893d2
|
affiliate-builder/upload_bdists.py
|
affiliate-builder/upload_bdists.py
|
from __future__ import (division, print_function, absolute_import,
unicode_literals)
import os
import glob
from conda import config
#from conda_build.metadata import MetaData
from binstar_client.inspect_package.conda import inspect_conda_package
from obvci.conda_tools.build import upload
from obvci.conda_tools.build_directory import Builder
from prepare_packages import RECIPE_FOLDER, BINSTAR_CHANNEL, BDIST_CONDA_FOLDER
def main():
# Get our binstar client from the Builder to get BINSTAR_TOKEN obfuscation
# in windows builds.
builder = Builder(RECIPE_FOLDER, BINSTAR_CHANNEL, 'main')
bdists = os.listdir(BDIST_CONDA_FOLDER)
conda_builds_dir = os.path.join(config.default_prefix,
'conda-bld', config.subdir)
built_packages = glob.glob(os.path.join(conda_builds_dir, '*.tar.bz2'))
for package in built_packages:
_, package_file = os.path.split(package)
name = package_file.split('-')[0]
if name in bdists:
# Need to upload this one...
# First grab the metadata from the package, which requires
# opening the file.
# SKIP THIS VESTIGAL CRAP
# with open(package) as f:
# package_data, release, file_data = inspect_conda_package(package, f)
# print(package_data, release, file_data)
# #package_data.update({'version': release['version']})
# package_data.update(release)
# package_data.update({'build': {'string': file_data['attrs']['build']}})
# package_data.update(file_data)
# meta = MetaData.fromdict({'package': package_data})
# meta.check_fields()
# print(meta)
# print('DIST:', meta.dist())
# RESUME READING HERE
# Not going to lie: after fighting with conda for 90 minutes to
# construct a proper MetaData object from a built package, I give
# up.
# Instead, create an object with one method, dist, which returns
# the build string and be done with it.
class MetaData(object):
def __init__(self, dist_info):
self._dist_info = dist_info
def dist(self):
return self._dist_info
meta = MetaData(package_file.split('.tar.bz2')[0])
# Upload it
upload(builder.binstar_cli, meta, BINSTAR_CHANNEL)
if __name__ == '__main__':
main()
|
Add script for uploading bdists
|
Add script for uploading bdists
|
Python
|
bsd-3-clause
|
bmorris3/conda-builder-affiliated,Cadair/conda-builder-affiliated,astropy/conda-build-tools,astropy/conda-builder-affiliated,cdeil/conda-builder-affiliated,astropy/conda-build-tools,cdeil/conda-builder-affiliated,kbarbary/conda-builder-affiliated,mwcraig/conda-builder-affiliated,astropy/conda-builder-affiliated,kbarbary/conda-builder-affiliated,zblz/conda-builder-affiliated,Cadair/conda-builder-affiliated,bmorris3/conda-builder-affiliated,zblz/conda-builder-affiliated,mwcraig/conda-builder-affiliated
|
Add script for uploading bdists
|
from __future__ import (division, print_function, absolute_import,
unicode_literals)
import os
import glob
from conda import config
#from conda_build.metadata import MetaData
from binstar_client.inspect_package.conda import inspect_conda_package
from obvci.conda_tools.build import upload
from obvci.conda_tools.build_directory import Builder
from prepare_packages import RECIPE_FOLDER, BINSTAR_CHANNEL, BDIST_CONDA_FOLDER
def main():
# Get our binstar client from the Builder to get BINSTAR_TOKEN obfuscation
# in windows builds.
builder = Builder(RECIPE_FOLDER, BINSTAR_CHANNEL, 'main')
bdists = os.listdir(BDIST_CONDA_FOLDER)
conda_builds_dir = os.path.join(config.default_prefix,
'conda-bld', config.subdir)
built_packages = glob.glob(os.path.join(conda_builds_dir, '*.tar.bz2'))
for package in built_packages:
_, package_file = os.path.split(package)
name = package_file.split('-')[0]
if name in bdists:
# Need to upload this one...
# First grab the metadata from the package, which requires
# opening the file.
# SKIP THIS VESTIGAL CRAP
# with open(package) as f:
# package_data, release, file_data = inspect_conda_package(package, f)
# print(package_data, release, file_data)
# #package_data.update({'version': release['version']})
# package_data.update(release)
# package_data.update({'build': {'string': file_data['attrs']['build']}})
# package_data.update(file_data)
# meta = MetaData.fromdict({'package': package_data})
# meta.check_fields()
# print(meta)
# print('DIST:', meta.dist())
# RESUME READING HERE
# Not going to lie: after fighting with conda for 90 minutes to
# construct a proper MetaData object from a built package, I give
# up.
# Instead, create an object with one method, dist, which returns
# the build string and be done with it.
class MetaData(object):
def __init__(self, dist_info):
self._dist_info = dist_info
def dist(self):
return self._dist_info
meta = MetaData(package_file.split('.tar.bz2')[0])
# Upload it
upload(builder.binstar_cli, meta, BINSTAR_CHANNEL)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script for uploading bdists<commit_after>
|
from __future__ import (division, print_function, absolute_import,
unicode_literals)
import os
import glob
from conda import config
#from conda_build.metadata import MetaData
from binstar_client.inspect_package.conda import inspect_conda_package
from obvci.conda_tools.build import upload
from obvci.conda_tools.build_directory import Builder
from prepare_packages import RECIPE_FOLDER, BINSTAR_CHANNEL, BDIST_CONDA_FOLDER
def main():
# Get our binstar client from the Builder to get BINSTAR_TOKEN obfuscation
# in windows builds.
builder = Builder(RECIPE_FOLDER, BINSTAR_CHANNEL, 'main')
bdists = os.listdir(BDIST_CONDA_FOLDER)
conda_builds_dir = os.path.join(config.default_prefix,
'conda-bld', config.subdir)
built_packages = glob.glob(os.path.join(conda_builds_dir, '*.tar.bz2'))
for package in built_packages:
_, package_file = os.path.split(package)
name = package_file.split('-')[0]
if name in bdists:
# Need to upload this one...
# First grab the metadata from the package, which requires
# opening the file.
# SKIP THIS VESTIGAL CRAP
# with open(package) as f:
# package_data, release, file_data = inspect_conda_package(package, f)
# print(package_data, release, file_data)
# #package_data.update({'version': release['version']})
# package_data.update(release)
# package_data.update({'build': {'string': file_data['attrs']['build']}})
# package_data.update(file_data)
# meta = MetaData.fromdict({'package': package_data})
# meta.check_fields()
# print(meta)
# print('DIST:', meta.dist())
# RESUME READING HERE
# Not going to lie: after fighting with conda for 90 minutes to
# construct a proper MetaData object from a built package, I give
# up.
# Instead, create an object with one method, dist, which returns
# the build string and be done with it.
class MetaData(object):
def __init__(self, dist_info):
self._dist_info = dist_info
def dist(self):
return self._dist_info
meta = MetaData(package_file.split('.tar.bz2')[0])
# Upload it
upload(builder.binstar_cli, meta, BINSTAR_CHANNEL)
if __name__ == '__main__':
main()
|
Add script for uploading bdistsfrom __future__ import (division, print_function, absolute_import,
unicode_literals)
import os
import glob
from conda import config
#from conda_build.metadata import MetaData
from binstar_client.inspect_package.conda import inspect_conda_package
from obvci.conda_tools.build import upload
from obvci.conda_tools.build_directory import Builder
from prepare_packages import RECIPE_FOLDER, BINSTAR_CHANNEL, BDIST_CONDA_FOLDER
def main():
# Get our binstar client from the Builder to get BINSTAR_TOKEN obfuscation
# in windows builds.
builder = Builder(RECIPE_FOLDER, BINSTAR_CHANNEL, 'main')
bdists = os.listdir(BDIST_CONDA_FOLDER)
conda_builds_dir = os.path.join(config.default_prefix,
'conda-bld', config.subdir)
built_packages = glob.glob(os.path.join(conda_builds_dir, '*.tar.bz2'))
for package in built_packages:
_, package_file = os.path.split(package)
name = package_file.split('-')[0]
if name in bdists:
# Need to upload this one...
# First grab the metadata from the package, which requires
# opening the file.
# SKIP THIS VESTIGAL CRAP
# with open(package) as f:
# package_data, release, file_data = inspect_conda_package(package, f)
# print(package_data, release, file_data)
# #package_data.update({'version': release['version']})
# package_data.update(release)
# package_data.update({'build': {'string': file_data['attrs']['build']}})
# package_data.update(file_data)
# meta = MetaData.fromdict({'package': package_data})
# meta.check_fields()
# print(meta)
# print('DIST:', meta.dist())
# RESUME READING HERE
# Not going to lie: after fighting with conda for 90 minutes to
# construct a proper MetaData object from a built package, I give
# up.
# Instead, create an object with one method, dist, which returns
# the build string and be done with it.
class MetaData(object):
def __init__(self, dist_info):
self._dist_info = dist_info
def dist(self):
return self._dist_info
meta = MetaData(package_file.split('.tar.bz2')[0])
# Upload it
upload(builder.binstar_cli, meta, BINSTAR_CHANNEL)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script for uploading bdists<commit_after>from __future__ import (division, print_function, absolute_import,
unicode_literals)
import os
import glob
from conda import config
#from conda_build.metadata import MetaData
from binstar_client.inspect_package.conda import inspect_conda_package
from obvci.conda_tools.build import upload
from obvci.conda_tools.build_directory import Builder
from prepare_packages import RECIPE_FOLDER, BINSTAR_CHANNEL, BDIST_CONDA_FOLDER
def main():
# Get our binstar client from the Builder to get BINSTAR_TOKEN obfuscation
# in windows builds.
builder = Builder(RECIPE_FOLDER, BINSTAR_CHANNEL, 'main')
bdists = os.listdir(BDIST_CONDA_FOLDER)
conda_builds_dir = os.path.join(config.default_prefix,
'conda-bld', config.subdir)
built_packages = glob.glob(os.path.join(conda_builds_dir, '*.tar.bz2'))
for package in built_packages:
_, package_file = os.path.split(package)
name = package_file.split('-')[0]
if name in bdists:
# Need to upload this one...
# First grab the metadata from the package, which requires
# opening the file.
# SKIP THIS VESTIGAL CRAP
# with open(package) as f:
# package_data, release, file_data = inspect_conda_package(package, f)
# print(package_data, release, file_data)
# #package_data.update({'version': release['version']})
# package_data.update(release)
# package_data.update({'build': {'string': file_data['attrs']['build']}})
# package_data.update(file_data)
# meta = MetaData.fromdict({'package': package_data})
# meta.check_fields()
# print(meta)
# print('DIST:', meta.dist())
# RESUME READING HERE
# Not going to lie: after fighting with conda for 90 minutes to
# construct a proper MetaData object from a built package, I give
# up.
# Instead, create an object with one method, dist, which returns
# the build string and be done with it.
class MetaData(object):
def __init__(self, dist_info):
self._dist_info = dist_info
def dist(self):
return self._dist_info
meta = MetaData(package_file.split('.tar.bz2')[0])
# Upload it
upload(builder.binstar_cli, meta, BINSTAR_CHANNEL)
if __name__ == '__main__':
main()
|
|
504e2321a001144d5466cb492c77f01e045c89d5
|
test/tests/python-imports/container.py
|
test/tests/python-imports/container.py
|
import curses
import readline
import bz2
assert(bz2.decompress(bz2.compress(b'IT WORKS IT WORKS IT WORKS')) == b'IT WORKS IT WORKS IT WORKS')
import platform
if platform.python_implementation() != 'PyPy' and platform.python_version_tuple()[0] != '2':
# PyPy and Python 2 don't support lzma
import lzma
assert(lzma.decompress(lzma.compress(b'IT WORKS IT WORKS IT WORKS')) == b'IT WORKS IT WORKS IT WORKS')
import zlib
assert(zlib.decompress(zlib.compress(b'IT WORKS IT WORKS IT WORKS')) == b'IT WORKS IT WORKS IT WORKS')
|
import curses
import dbm
import readline
import bz2
assert(bz2.decompress(bz2.compress(b'IT WORKS IT WORKS IT WORKS')) == b'IT WORKS IT WORKS IT WORKS')
import platform
if platform.python_implementation() != 'PyPy' and platform.python_version_tuple()[0] != '2':
# PyPy and Python 2 don't support lzma
import lzma
assert(lzma.decompress(lzma.compress(b'IT WORKS IT WORKS IT WORKS')) == b'IT WORKS IT WORKS IT WORKS')
import zlib
assert(zlib.decompress(zlib.compress(b'IT WORKS IT WORKS IT WORKS')) == b'IT WORKS IT WORKS IT WORKS')
|
Add "dbm" to "python-imports" test
|
Add "dbm" to "python-imports" test
|
Python
|
apache-2.0
|
emilevauge/official-images,chorrell/official-images,docker-solr/official-images,davidl-zend/official-images,jperrin/official-images,docker-solr/official-images,davidl-zend/official-images,31z4/official-images,nodejs-docker-bot/official-images,emilevauge/official-images,davidl-zend/official-images,chorrell/official-images,thresheek/official-images,robfrank/official-images,thresheek/official-images,dinogun/official-images,pesho/docker-official-images,infosiftr/stackbrew,nodejs-docker-bot/official-images,infosiftr/stackbrew,docker-solr/official-images,neo-technology/docker-official-images,robfrank/official-images,emilevauge/official-images,dinogun/official-images,infosiftr/stackbrew,docker-solr/official-images,mattrobenolt/official-images,docker-solr/official-images,emilevauge/official-images,docker-flink/official-images,docker-solr/official-images,nodejs-docker-bot/official-images,infosiftr/stackbrew,infosiftr/stackbrew,nodejs-docker-bot/official-images,31z4/official-images,neo-technology/docker-official-images,mattrobenolt/official-images,docker-flink/official-images,docker-solr/official-images,docker-library/official-images,chorrell/official-images,dinogun/official-images,nodejs-docker-bot/official-images,robfrank/official-images,davidl-zend/official-images,31z4/official-images,31z4/official-images,jperrin/official-images,chorrell/official-images,docker-library/official-images,jperrin/official-images,jperrin/official-images,mattrobenolt/official-images,davidl-zend/official-images,neo-technology/docker-official-images,docker-flink/official-images,pesho/docker-official-images,emilevauge/official-images,31z4/official-images,pesho/docker-official-images,neo-technology/docker-official-images,thresheek/official-images,docker-library/official-images,docker-solr/official-images,docker-solr/official-images,31z4/official-images,mattrobenolt/official-images,jperrin/official-images,thresheek/official-images,docker-library/official-images,davidl-zend/official-images,docker-solr/official-images,emilevauge/official-images,nodejs-docker-bot/official-images,mattrobenolt/official-images,infosiftr/stackbrew,dinogun/official-images,jperrin/official-images,docker-flink/official-images,mattrobenolt/official-images,dinogun/official-images,emilevauge/official-images,docker-flink/official-images,davidl-zend/official-images,robfrank/official-images,chorrell/official-images,robfrank/official-images,robfrank/official-images,chorrell/official-images,thresheek/official-images,davidl-zend/official-images,neo-technology/docker-official-images,jperrin/official-images,infosiftr/stackbrew,docker-library/official-images,dinogun/official-images,dinogun/official-images,dinogun/official-images,docker-solr/official-images,emilevauge/official-images,pesho/docker-official-images,robfrank/official-images,docker-library/official-images,nodejs-docker-bot/official-images,neo-technology/docker-official-images,jperrin/official-images,thresheek/official-images,docker-flink/official-images,neo-technology/docker-official-images,mattrobenolt/official-images,31z4/official-images,31z4/official-images,davidl-zend/official-images,docker-library/official-images,davidl-zend/official-images,docker-flink/official-images,docker-flink/official-images,mattrobenolt/official-images,thresheek/official-images,pesho/docker-official-images,pesho/docker-official-images,jperrin/official-images,pesho/docker-official-images,docker-library/official-images,docker-flink/official-images,robfrank/official-images,robfrank/official-images,davidl-zend/official-images,docker-flink/official-images,thresheek/official-images,infosiftr/stackbrew,docker-solr/official-images,infosiftr/stackbrew,pesho/docker-official-images,nodejs-docker-bot/official-images,robfrank/official-images,docker-flink/official-images,pesho/docker-official-images,neo-technology/docker-official-images,davidl-zend/official-images,31z4/official-images,nodejs-docker-bot/official-images,chorrell/official-images,infosiftr/stackbrew,neo-technology/docker-official-images,nodejs-docker-bot/official-images,dinogun/official-images,mattrobenolt/official-images,jperrin/official-images,docker-solr/official-images,dinogun/official-images,docker-library/official-images,robfrank/official-images,pesho/docker-official-images,mattrobenolt/official-images,mattrobenolt/official-images,thresheek/official-images,docker-library/official-images,thresheek/official-images,thresheek/official-images,dinogun/official-images,dinogun/official-images,31z4/official-images,thresheek/official-images,docker-library/official-images,docker-library/official-images,neo-technology/docker-official-images,neo-technology/docker-official-images,docker-library/official-images,infosiftr/stackbrew,docker-flink/official-images,robfrank/official-images,31z4/official-images,davidl-zend/official-images,neo-technology/docker-official-images,docker-library/official-images,chorrell/official-images,infosiftr/stackbrew,jperrin/official-images,chorrell/official-images,chorrell/official-images,docker-solr/official-images,emilevauge/official-images,dinogun/official-images,chorrell/official-images,mattrobenolt/official-images,31z4/official-images,jperrin/official-images,robfrank/official-images,nodejs-docker-bot/official-images,nodejs-docker-bot/official-images,thresheek/official-images,neo-technology/docker-official-images,pesho/docker-official-images,infosiftr/stackbrew,emilevauge/official-images,emilevauge/official-images,thresheek/official-images,31z4/official-images,neo-technology/docker-official-images,pesho/docker-official-images,mattrobenolt/official-images,chorrell/official-images,emilevauge/official-images,chorrell/official-images,31z4/official-images,docker-flink/official-images,jperrin/official-images,infosiftr/stackbrew
|
import curses
import readline
import bz2
assert(bz2.decompress(bz2.compress(b'IT WORKS IT WORKS IT WORKS')) == b'IT WORKS IT WORKS IT WORKS')
import platform
if platform.python_implementation() != 'PyPy' and platform.python_version_tuple()[0] != '2':
# PyPy and Python 2 don't support lzma
import lzma
assert(lzma.decompress(lzma.compress(b'IT WORKS IT WORKS IT WORKS')) == b'IT WORKS IT WORKS IT WORKS')
import zlib
assert(zlib.decompress(zlib.compress(b'IT WORKS IT WORKS IT WORKS')) == b'IT WORKS IT WORKS IT WORKS')
Add "dbm" to "python-imports" test
|
import curses
import dbm
import readline
import bz2
assert(bz2.decompress(bz2.compress(b'IT WORKS IT WORKS IT WORKS')) == b'IT WORKS IT WORKS IT WORKS')
import platform
if platform.python_implementation() != 'PyPy' and platform.python_version_tuple()[0] != '2':
# PyPy and Python 2 don't support lzma
import lzma
assert(lzma.decompress(lzma.compress(b'IT WORKS IT WORKS IT WORKS')) == b'IT WORKS IT WORKS IT WORKS')
import zlib
assert(zlib.decompress(zlib.compress(b'IT WORKS IT WORKS IT WORKS')) == b'IT WORKS IT WORKS IT WORKS')
|
<commit_before>import curses
import readline
import bz2
assert(bz2.decompress(bz2.compress(b'IT WORKS IT WORKS IT WORKS')) == b'IT WORKS IT WORKS IT WORKS')
import platform
if platform.python_implementation() != 'PyPy' and platform.python_version_tuple()[0] != '2':
# PyPy and Python 2 don't support lzma
import lzma
assert(lzma.decompress(lzma.compress(b'IT WORKS IT WORKS IT WORKS')) == b'IT WORKS IT WORKS IT WORKS')
import zlib
assert(zlib.decompress(zlib.compress(b'IT WORKS IT WORKS IT WORKS')) == b'IT WORKS IT WORKS IT WORKS')
<commit_msg>Add "dbm" to "python-imports" test<commit_after>
|
import curses
import dbm
import readline
import bz2
assert(bz2.decompress(bz2.compress(b'IT WORKS IT WORKS IT WORKS')) == b'IT WORKS IT WORKS IT WORKS')
import platform
if platform.python_implementation() != 'PyPy' and platform.python_version_tuple()[0] != '2':
# PyPy and Python 2 don't support lzma
import lzma
assert(lzma.decompress(lzma.compress(b'IT WORKS IT WORKS IT WORKS')) == b'IT WORKS IT WORKS IT WORKS')
import zlib
assert(zlib.decompress(zlib.compress(b'IT WORKS IT WORKS IT WORKS')) == b'IT WORKS IT WORKS IT WORKS')
|
import curses
import readline
import bz2
assert(bz2.decompress(bz2.compress(b'IT WORKS IT WORKS IT WORKS')) == b'IT WORKS IT WORKS IT WORKS')
import platform
if platform.python_implementation() != 'PyPy' and platform.python_version_tuple()[0] != '2':
# PyPy and Python 2 don't support lzma
import lzma
assert(lzma.decompress(lzma.compress(b'IT WORKS IT WORKS IT WORKS')) == b'IT WORKS IT WORKS IT WORKS')
import zlib
assert(zlib.decompress(zlib.compress(b'IT WORKS IT WORKS IT WORKS')) == b'IT WORKS IT WORKS IT WORKS')
Add "dbm" to "python-imports" testimport curses
import dbm
import readline
import bz2
assert(bz2.decompress(bz2.compress(b'IT WORKS IT WORKS IT WORKS')) == b'IT WORKS IT WORKS IT WORKS')
import platform
if platform.python_implementation() != 'PyPy' and platform.python_version_tuple()[0] != '2':
# PyPy and Python 2 don't support lzma
import lzma
assert(lzma.decompress(lzma.compress(b'IT WORKS IT WORKS IT WORKS')) == b'IT WORKS IT WORKS IT WORKS')
import zlib
assert(zlib.decompress(zlib.compress(b'IT WORKS IT WORKS IT WORKS')) == b'IT WORKS IT WORKS IT WORKS')
|
<commit_before>import curses
import readline
import bz2
assert(bz2.decompress(bz2.compress(b'IT WORKS IT WORKS IT WORKS')) == b'IT WORKS IT WORKS IT WORKS')
import platform
if platform.python_implementation() != 'PyPy' and platform.python_version_tuple()[0] != '2':
# PyPy and Python 2 don't support lzma
import lzma
assert(lzma.decompress(lzma.compress(b'IT WORKS IT WORKS IT WORKS')) == b'IT WORKS IT WORKS IT WORKS')
import zlib
assert(zlib.decompress(zlib.compress(b'IT WORKS IT WORKS IT WORKS')) == b'IT WORKS IT WORKS IT WORKS')
<commit_msg>Add "dbm" to "python-imports" test<commit_after>import curses
import dbm
import readline
import bz2
assert(bz2.decompress(bz2.compress(b'IT WORKS IT WORKS IT WORKS')) == b'IT WORKS IT WORKS IT WORKS')
import platform
if platform.python_implementation() != 'PyPy' and platform.python_version_tuple()[0] != '2':
# PyPy and Python 2 don't support lzma
import lzma
assert(lzma.decompress(lzma.compress(b'IT WORKS IT WORKS IT WORKS')) == b'IT WORKS IT WORKS IT WORKS')
import zlib
assert(zlib.decompress(zlib.compress(b'IT WORKS IT WORKS IT WORKS')) == b'IT WORKS IT WORKS IT WORKS')
|
df10bd85bcf94aba9661e397960a1cd3b4dd090d
|
POST_TEST.py
|
POST_TEST.py
|
from posts.nc import *
import posts.iso
output('POST_TEST.txt')
program_begin(123, 'Test program')
absolute()
metric()
set_plane(0)
feedrate(420)
rapid(100,120)
rapid(z=50)
feed(z=0)
rapid(z=50)
rapid_home()
program_end()
|
Test for ISO NC creator
|
Test for ISO NC creator
|
Python
|
bsd-3-clause
|
danheeks/heekscnc,danheeks/heekscnc,danheeks/heekscnc,danheeks/heekscnc
|
Test for ISO NC creator
|
from posts.nc import *
import posts.iso
output('POST_TEST.txt')
program_begin(123, 'Test program')
absolute()
metric()
set_plane(0)
feedrate(420)
rapid(100,120)
rapid(z=50)
feed(z=0)
rapid(z=50)
rapid_home()
program_end()
|
<commit_before><commit_msg>Test for ISO NC creator<commit_after>
|
from posts.nc import *
import posts.iso
output('POST_TEST.txt')
program_begin(123, 'Test program')
absolute()
metric()
set_plane(0)
feedrate(420)
rapid(100,120)
rapid(z=50)
feed(z=0)
rapid(z=50)
rapid_home()
program_end()
|
Test for ISO NC creatorfrom posts.nc import *
import posts.iso
output('POST_TEST.txt')
program_begin(123, 'Test program')
absolute()
metric()
set_plane(0)
feedrate(420)
rapid(100,120)
rapid(z=50)
feed(z=0)
rapid(z=50)
rapid_home()
program_end()
|
<commit_before><commit_msg>Test for ISO NC creator<commit_after>from posts.nc import *
import posts.iso
output('POST_TEST.txt')
program_begin(123, 'Test program')
absolute()
metric()
set_plane(0)
feedrate(420)
rapid(100,120)
rapid(z=50)
feed(z=0)
rapid(z=50)
rapid_home()
program_end()
|
|
48984b9ab60c14c5d47ee329ef906d47b35b8ade
|
biothings/tests/test_query.py
|
biothings/tests/test_query.py
|
'''
Biothings Query Component Common Tests
'''
import os
from nose.core import main
from biothings.tests import BiothingsTestCase
class QueryTests(BiothingsTestCase):
''' Test against server specified in environment variable BT_HOST
and BT_API or MyGene.info production server V3 by default '''
__test__ = True
host = os.getenv("BT_HOST", "http://mygene.info")
api = os.getenv("BT_API", "/v3")
def test_01(self):
''' KWARGS CTRL Format Json '''
self.query(q='*', size='1')
def test_02(self):
''' KWARGS CTRL Format Yaml '''
res = self.request('query?q=*&size=1&out_format=yaml').text
assert res.startswith('max_score:')
def test_03(self):
''' KWARGS CTRL Format Html '''
res = self.request('query?q=*&size=1&out_format=html').text
assert '<html>' in res
def test_04(self):
''' KWARGS CTRL Format Msgpack '''
res = self.request('query?q=*&size=1&out_format=msgpack').content
self.msgpack_ok(res)
if __name__ == '__main__':
main(defaultTest='__main__.QueryTests', argv=['', '-v'])
|
Add common tests for query endpoint
|
Add common tests for query endpoint
|
Python
|
apache-2.0
|
biothings/biothings.api,biothings/biothings.api
|
Add common tests for query endpoint
|
'''
Biothings Query Component Common Tests
'''
import os
from nose.core import main
from biothings.tests import BiothingsTestCase
class QueryTests(BiothingsTestCase):
''' Test against server specified in environment variable BT_HOST
and BT_API or MyGene.info production server V3 by default '''
__test__ = True
host = os.getenv("BT_HOST", "http://mygene.info")
api = os.getenv("BT_API", "/v3")
def test_01(self):
''' KWARGS CTRL Format Json '''
self.query(q='*', size='1')
def test_02(self):
''' KWARGS CTRL Format Yaml '''
res = self.request('query?q=*&size=1&out_format=yaml').text
assert res.startswith('max_score:')
def test_03(self):
''' KWARGS CTRL Format Html '''
res = self.request('query?q=*&size=1&out_format=html').text
assert '<html>' in res
def test_04(self):
''' KWARGS CTRL Format Msgpack '''
res = self.request('query?q=*&size=1&out_format=msgpack').content
self.msgpack_ok(res)
if __name__ == '__main__':
main(defaultTest='__main__.QueryTests', argv=['', '-v'])
|
<commit_before><commit_msg>Add common tests for query endpoint<commit_after>
|
'''
Biothings Query Component Common Tests
'''
import os
from nose.core import main
from biothings.tests import BiothingsTestCase
class QueryTests(BiothingsTestCase):
''' Test against server specified in environment variable BT_HOST
and BT_API or MyGene.info production server V3 by default '''
__test__ = True
host = os.getenv("BT_HOST", "http://mygene.info")
api = os.getenv("BT_API", "/v3")
def test_01(self):
''' KWARGS CTRL Format Json '''
self.query(q='*', size='1')
def test_02(self):
''' KWARGS CTRL Format Yaml '''
res = self.request('query?q=*&size=1&out_format=yaml').text
assert res.startswith('max_score:')
def test_03(self):
''' KWARGS CTRL Format Html '''
res = self.request('query?q=*&size=1&out_format=html').text
assert '<html>' in res
def test_04(self):
''' KWARGS CTRL Format Msgpack '''
res = self.request('query?q=*&size=1&out_format=msgpack').content
self.msgpack_ok(res)
if __name__ == '__main__':
main(defaultTest='__main__.QueryTests', argv=['', '-v'])
|
Add common tests for query endpoint'''
Biothings Query Component Common Tests
'''
import os
from nose.core import main
from biothings.tests import BiothingsTestCase
class QueryTests(BiothingsTestCase):
''' Test against server specified in environment variable BT_HOST
and BT_API or MyGene.info production server V3 by default '''
__test__ = True
host = os.getenv("BT_HOST", "http://mygene.info")
api = os.getenv("BT_API", "/v3")
def test_01(self):
''' KWARGS CTRL Format Json '''
self.query(q='*', size='1')
def test_02(self):
''' KWARGS CTRL Format Yaml '''
res = self.request('query?q=*&size=1&out_format=yaml').text
assert res.startswith('max_score:')
def test_03(self):
''' KWARGS CTRL Format Html '''
res = self.request('query?q=*&size=1&out_format=html').text
assert '<html>' in res
def test_04(self):
''' KWARGS CTRL Format Msgpack '''
res = self.request('query?q=*&size=1&out_format=msgpack').content
self.msgpack_ok(res)
if __name__ == '__main__':
main(defaultTest='__main__.QueryTests', argv=['', '-v'])
|
<commit_before><commit_msg>Add common tests for query endpoint<commit_after>'''
Biothings Query Component Common Tests
'''
import os
from nose.core import main
from biothings.tests import BiothingsTestCase
class QueryTests(BiothingsTestCase):
''' Test against server specified in environment variable BT_HOST
and BT_API or MyGene.info production server V3 by default '''
__test__ = True
host = os.getenv("BT_HOST", "http://mygene.info")
api = os.getenv("BT_API", "/v3")
def test_01(self):
''' KWARGS CTRL Format Json '''
self.query(q='*', size='1')
def test_02(self):
''' KWARGS CTRL Format Yaml '''
res = self.request('query?q=*&size=1&out_format=yaml').text
assert res.startswith('max_score:')
def test_03(self):
''' KWARGS CTRL Format Html '''
res = self.request('query?q=*&size=1&out_format=html').text
assert '<html>' in res
def test_04(self):
''' KWARGS CTRL Format Msgpack '''
res = self.request('query?q=*&size=1&out_format=msgpack').content
self.msgpack_ok(res)
if __name__ == '__main__':
main(defaultTest='__main__.QueryTests', argv=['', '-v'])
|
|
638e2d7a33f522007d80d39e0a8e5bface654286
|
test/test_scripts/test_utils.py
|
test/test_scripts/test_utils.py
|
from scripts.init import utils
import unittest
class TestUtils(unittest.TestCase):
@classmethod
def setUpClass(self):
pass
def test_get_parameter(self):
api = utils.get_parameter('api')
url = utils.get_parameter('api', 'url')
# Assert parameters are not empty
self.assertGreater(len(api), 0)
self.assertGreater(len(url), 0)
def test_execute_graphql_request(self):
payload = 'query{allDataSourceTypes{nodes{id}}}'
data = utils.execute_graphql_request(payload)
nb_records = len(data['data']['allDataSourceTypes']['nodes'])
# Assert graphql query returned records
self.assertGreater(nb_records, 0)
@classmethod
def tearDownClass(self):
pass
if __name__ == '__main__':
unittest.main()
|
Add unit tests for utility methods.
|
Add unit tests for utility methods.
|
Python
|
apache-2.0
|
alexisrolland/data-quality,alexisrolland/data-quality,alexisrolland/data-quality,alexisrolland/data-quality
|
Add unit tests for utility methods.
|
from scripts.init import utils
import unittest
class TestUtils(unittest.TestCase):
@classmethod
def setUpClass(self):
pass
def test_get_parameter(self):
api = utils.get_parameter('api')
url = utils.get_parameter('api', 'url')
# Assert parameters are not empty
self.assertGreater(len(api), 0)
self.assertGreater(len(url), 0)
def test_execute_graphql_request(self):
payload = 'query{allDataSourceTypes{nodes{id}}}'
data = utils.execute_graphql_request(payload)
nb_records = len(data['data']['allDataSourceTypes']['nodes'])
# Assert graphql query returned records
self.assertGreater(nb_records, 0)
@classmethod
def tearDownClass(self):
pass
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add unit tests for utility methods.<commit_after>
|
from scripts.init import utils
import unittest
class TestUtils(unittest.TestCase):
@classmethod
def setUpClass(self):
pass
def test_get_parameter(self):
api = utils.get_parameter('api')
url = utils.get_parameter('api', 'url')
# Assert parameters are not empty
self.assertGreater(len(api), 0)
self.assertGreater(len(url), 0)
def test_execute_graphql_request(self):
payload = 'query{allDataSourceTypes{nodes{id}}}'
data = utils.execute_graphql_request(payload)
nb_records = len(data['data']['allDataSourceTypes']['nodes'])
# Assert graphql query returned records
self.assertGreater(nb_records, 0)
@classmethod
def tearDownClass(self):
pass
if __name__ == '__main__':
unittest.main()
|
Add unit tests for utility methods.from scripts.init import utils
import unittest
class TestUtils(unittest.TestCase):
@classmethod
def setUpClass(self):
pass
def test_get_parameter(self):
api = utils.get_parameter('api')
url = utils.get_parameter('api', 'url')
# Assert parameters are not empty
self.assertGreater(len(api), 0)
self.assertGreater(len(url), 0)
def test_execute_graphql_request(self):
payload = 'query{allDataSourceTypes{nodes{id}}}'
data = utils.execute_graphql_request(payload)
nb_records = len(data['data']['allDataSourceTypes']['nodes'])
# Assert graphql query returned records
self.assertGreater(nb_records, 0)
@classmethod
def tearDownClass(self):
pass
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add unit tests for utility methods.<commit_after>from scripts.init import utils
import unittest
class TestUtils(unittest.TestCase):
@classmethod
def setUpClass(self):
pass
def test_get_parameter(self):
api = utils.get_parameter('api')
url = utils.get_parameter('api', 'url')
# Assert parameters are not empty
self.assertGreater(len(api), 0)
self.assertGreater(len(url), 0)
def test_execute_graphql_request(self):
payload = 'query{allDataSourceTypes{nodes{id}}}'
data = utils.execute_graphql_request(payload)
nb_records = len(data['data']['allDataSourceTypes']['nodes'])
# Assert graphql query returned records
self.assertGreater(nb_records, 0)
@classmethod
def tearDownClass(self):
pass
if __name__ == '__main__':
unittest.main()
|
|
5bab29cbdb1d5db9949a9379656cf1a925fcf20a
|
tests/Settings/TestSettingFunction.py
|
tests/Settings/TestSettingFunction.py
|
# Copyright (c) 2016 Ultimaker B.V.
# Uranium is released under the terms of the AGPLv3 or higher.
import pytest
import UM.Settings.SettingFunction
## Individual test cases for the good setting functions.
#
# Each test will be executed with each of these functions. These functions are
# all good and should work.
setting_function_good_data = [
"0", # Number.
"\"x\"", # String.
"foo", # Variable.
"math.sqrt(4)", # Function call.
"foo * zoo" # Two variables.
]
## Fixture to create a setting function.
#
# These setting functions are all built with good functions. Id est no errors
# should occur during the creation of the fixture.
@pytest.fixture(params = setting_function_good_data)
def setting_function_good(request):
return UM.Settings.SettingFunction.SettingFunction(request.param)
## Individual test cases for the bad setting functions.
#
# Each test will be executed with each of these functions. These functions are
# all bad and should not work.
setting_function_bad_data = [
"" # Empty string.
"lambda i: os.open(/etc/passwd).read()", # Function that reads your passwords from your system.
"exec(\"lambda i: o\" + \"s.open(/etc/passwd).read()\")", # Obfuscated function that reads your passwords from your system.
"(" # Syntax error.
]
## Fixture to create a setting function.
#
# These setting functions are all built with bad functions. Id est they should
# give an error when creating the fixture.
@pytest.fixture(params = setting_function_bad_data)
def setting_function_bad(request):
return UM.Settings.SettingFunction.SettingFunction(request.param)
## Tests the initialisation of setting functions with good functions.
#
# Each of these should create a good function.
def test_init_good(setting_function_good):
assert setting_function_good is not None
assert setting_function_good.isValid()
## Tests the initialisation of setting functions with bad functions.
#
# Each of these should create a bad function.
def test_init_bad(setting_function_bad):
assert setting_function_bad is not None
assert not setting_function_bad.isValid()
|
Add test suite for SettingFunction
|
Add test suite for SettingFunction
It currently tests only the initialisation.
Contributes to issue CURA-1278.
|
Python
|
agpl-3.0
|
onitake/Uranium,onitake/Uranium
|
Add test suite for SettingFunction
It currently tests only the initialisation.
Contributes to issue CURA-1278.
|
# Copyright (c) 2016 Ultimaker B.V.
# Uranium is released under the terms of the AGPLv3 or higher.
import pytest
import UM.Settings.SettingFunction
## Individual test cases for the good setting functions.
#
# Each test will be executed with each of these functions. These functions are
# all good and should work.
setting_function_good_data = [
"0", # Number.
"\"x\"", # String.
"foo", # Variable.
"math.sqrt(4)", # Function call.
"foo * zoo" # Two variables.
]
## Fixture to create a setting function.
#
# These setting functions are all built with good functions. Id est no errors
# should occur during the creation of the fixture.
@pytest.fixture(params = setting_function_good_data)
def setting_function_good(request):
return UM.Settings.SettingFunction.SettingFunction(request.param)
## Individual test cases for the bad setting functions.
#
# Each test will be executed with each of these functions. These functions are
# all bad and should not work.
setting_function_bad_data = [
"" # Empty string.
"lambda i: os.open(/etc/passwd).read()", # Function that reads your passwords from your system.
"exec(\"lambda i: o\" + \"s.open(/etc/passwd).read()\")", # Obfuscated function that reads your passwords from your system.
"(" # Syntax error.
]
## Fixture to create a setting function.
#
# These setting functions are all built with bad functions. Id est they should
# give an error when creating the fixture.
@pytest.fixture(params = setting_function_bad_data)
def setting_function_bad(request):
return UM.Settings.SettingFunction.SettingFunction(request.param)
## Tests the initialisation of setting functions with good functions.
#
# Each of these should create a good function.
def test_init_good(setting_function_good):
assert setting_function_good is not None
assert setting_function_good.isValid()
## Tests the initialisation of setting functions with bad functions.
#
# Each of these should create a bad function.
def test_init_bad(setting_function_bad):
assert setting_function_bad is not None
assert not setting_function_bad.isValid()
|
<commit_before><commit_msg>Add test suite for SettingFunction
It currently tests only the initialisation.
Contributes to issue CURA-1278.<commit_after>
|
# Copyright (c) 2016 Ultimaker B.V.
# Uranium is released under the terms of the AGPLv3 or higher.
import pytest
import UM.Settings.SettingFunction
## Individual test cases for the good setting functions.
#
# Each test will be executed with each of these functions. These functions are
# all good and should work.
setting_function_good_data = [
"0", # Number.
"\"x\"", # String.
"foo", # Variable.
"math.sqrt(4)", # Function call.
"foo * zoo" # Two variables.
]
## Fixture to create a setting function.
#
# These setting functions are all built with good functions. Id est no errors
# should occur during the creation of the fixture.
@pytest.fixture(params = setting_function_good_data)
def setting_function_good(request):
return UM.Settings.SettingFunction.SettingFunction(request.param)
## Individual test cases for the bad setting functions.
#
# Each test will be executed with each of these functions. These functions are
# all bad and should not work.
setting_function_bad_data = [
"" # Empty string.
"lambda i: os.open(/etc/passwd).read()", # Function that reads your passwords from your system.
"exec(\"lambda i: o\" + \"s.open(/etc/passwd).read()\")", # Obfuscated function that reads your passwords from your system.
"(" # Syntax error.
]
## Fixture to create a setting function.
#
# These setting functions are all built with bad functions. Id est they should
# give an error when creating the fixture.
@pytest.fixture(params = setting_function_bad_data)
def setting_function_bad(request):
return UM.Settings.SettingFunction.SettingFunction(request.param)
## Tests the initialisation of setting functions with good functions.
#
# Each of these should create a good function.
def test_init_good(setting_function_good):
assert setting_function_good is not None
assert setting_function_good.isValid()
## Tests the initialisation of setting functions with bad functions.
#
# Each of these should create a bad function.
def test_init_bad(setting_function_bad):
assert setting_function_bad is not None
assert not setting_function_bad.isValid()
|
Add test suite for SettingFunction
It currently tests only the initialisation.
Contributes to issue CURA-1278.# Copyright (c) 2016 Ultimaker B.V.
# Uranium is released under the terms of the AGPLv3 or higher.
import pytest
import UM.Settings.SettingFunction
## Individual test cases for the good setting functions.
#
# Each test will be executed with each of these functions. These functions are
# all good and should work.
setting_function_good_data = [
"0", # Number.
"\"x\"", # String.
"foo", # Variable.
"math.sqrt(4)", # Function call.
"foo * zoo" # Two variables.
]
## Fixture to create a setting function.
#
# These setting functions are all built with good functions. Id est no errors
# should occur during the creation of the fixture.
@pytest.fixture(params = setting_function_good_data)
def setting_function_good(request):
return UM.Settings.SettingFunction.SettingFunction(request.param)
## Individual test cases for the bad setting functions.
#
# Each test will be executed with each of these functions. These functions are
# all bad and should not work.
setting_function_bad_data = [
"" # Empty string.
"lambda i: os.open(/etc/passwd).read()", # Function that reads your passwords from your system.
"exec(\"lambda i: o\" + \"s.open(/etc/passwd).read()\")", # Obfuscated function that reads your passwords from your system.
"(" # Syntax error.
]
## Fixture to create a setting function.
#
# These setting functions are all built with bad functions. Id est they should
# give an error when creating the fixture.
@pytest.fixture(params = setting_function_bad_data)
def setting_function_bad(request):
return UM.Settings.SettingFunction.SettingFunction(request.param)
## Tests the initialisation of setting functions with good functions.
#
# Each of these should create a good function.
def test_init_good(setting_function_good):
assert setting_function_good is not None
assert setting_function_good.isValid()
## Tests the initialisation of setting functions with bad functions.
#
# Each of these should create a bad function.
def test_init_bad(setting_function_bad):
assert setting_function_bad is not None
assert not setting_function_bad.isValid()
|
<commit_before><commit_msg>Add test suite for SettingFunction
It currently tests only the initialisation.
Contributes to issue CURA-1278.<commit_after># Copyright (c) 2016 Ultimaker B.V.
# Uranium is released under the terms of the AGPLv3 or higher.
import pytest
import UM.Settings.SettingFunction
## Individual test cases for the good setting functions.
#
# Each test will be executed with each of these functions. These functions are
# all good and should work.
setting_function_good_data = [
"0", # Number.
"\"x\"", # String.
"foo", # Variable.
"math.sqrt(4)", # Function call.
"foo * zoo" # Two variables.
]
## Fixture to create a setting function.
#
# These setting functions are all built with good functions. Id est no errors
# should occur during the creation of the fixture.
@pytest.fixture(params = setting_function_good_data)
def setting_function_good(request):
return UM.Settings.SettingFunction.SettingFunction(request.param)
## Individual test cases for the bad setting functions.
#
# Each test will be executed with each of these functions. These functions are
# all bad and should not work.
setting_function_bad_data = [
"" # Empty string.
"lambda i: os.open(/etc/passwd).read()", # Function that reads your passwords from your system.
"exec(\"lambda i: o\" + \"s.open(/etc/passwd).read()\")", # Obfuscated function that reads your passwords from your system.
"(" # Syntax error.
]
## Fixture to create a setting function.
#
# These setting functions are all built with bad functions. Id est they should
# give an error when creating the fixture.
@pytest.fixture(params = setting_function_bad_data)
def setting_function_bad(request):
return UM.Settings.SettingFunction.SettingFunction(request.param)
## Tests the initialisation of setting functions with good functions.
#
# Each of these should create a good function.
def test_init_good(setting_function_good):
assert setting_function_good is not None
assert setting_function_good.isValid()
## Tests the initialisation of setting functions with bad functions.
#
# Each of these should create a bad function.
def test_init_bad(setting_function_bad):
assert setting_function_bad is not None
assert not setting_function_bad.isValid()
|
|
5e13e3bc045d496232e5ced6b7dc314f14183257
|
doc/examples/viennagrid_wrapper/io_stats.py
|
doc/examples/viennagrid_wrapper/io_stats.py
|
#!/usr/bin/env python
#
# This example shows is like the readers and writers example ('io.py'),
# but this one also calculates some statistics on the elapsed time, the
# number of vertices an cells read, etc.
from __future__ import print_function
# In this example, we will set up a domain of triangles in the cartesian 3D
# space from the contents of a Netgen mesh file.
#
# For that purpose, we need to define a domain and, eventually, also a segmentation
# (in case we want to read segmentation data from the mesh file), and we need the
# Netgen reader function, too.
#
# (Notice that the 'read_netgen' function and all other I/O functions
# work with any type of domain and segmentation without name change.)
from viennagrid.wrapper import TriangularCartesian3D_Domain as Domain
from viennagrid.wrapper import TriangularCartesian3D_Segmentation as Segmentation
from viennagrid.wrapper import read_netgen
import time
# In case we want to read only the domain information from the mesh file, we would
# just create an empty domain and call the Netgen reader on it with the file path
# where the mesh file can be found.
domain = Domain()
start_time = time.time()
read_netgen('../data/half-trigate.mesh', domain)
end_time = time.time()
elapsed_time = end_time - start_time
print('Elapsed time: ', elapsed_time, ' seconds (', elapsed_time / 60, ')', sep='')
print('Read domain with', domain.num_vertices, 'vertices')
# In case we want to read not only the domain information, but also the segmentation
# information from the mesh file, we would have to create an empty domain and an
# empty segmentation on that domain, and then call the Netgen reader.
domain = Domain()
segmentation = Segmentation(domain)
start_time = time.time()
read_netgen('../data/half-trigate.mesh', domain, segmentation)
end_time = time.time()
elapsed_time = end_time - start_time
print('Elapsed time: ', elapsed_time, ' seconds (', elapsed_time / 60, ')', sep='')
print('Read domain with', domain.num_vertices, 'vertices')
print('Read segmentation with', segmentation.num_segments, 'segmets')
for i, seg in enumerate(segmentation.segments):
print('Segment #', i, ' contains ', seg.num_cells, ' cells', sep='')
|
Create a copy of the Netgen reader example and add some statistics calculation.
|
Create a copy of the Netgen reader example and add some statistics calculation.
These additions calculate the elapsed time (the time that the reader has taken in order to read the mesh file), the number of vertices in the domain and the number of cells in the segmentation (if applicable).
|
Python
|
mit
|
jonancm/viennagrid-python,jonancm/viennagrid-python,jonancm/viennagrid-python
|
Create a copy of the Netgen reader example and add some statistics calculation.
These additions calculate the elapsed time (the time that the reader has taken in order to read the mesh file), the number of vertices in the domain and the number of cells in the segmentation (if applicable).
|
#!/usr/bin/env python
#
# This example shows is like the readers and writers example ('io.py'),
# but this one also calculates some statistics on the elapsed time, the
# number of vertices an cells read, etc.
from __future__ import print_function
# In this example, we will set up a domain of triangles in the cartesian 3D
# space from the contents of a Netgen mesh file.
#
# For that purpose, we need to define a domain and, eventually, also a segmentation
# (in case we want to read segmentation data from the mesh file), and we need the
# Netgen reader function, too.
#
# (Notice that the 'read_netgen' function and all other I/O functions
# work with any type of domain and segmentation without name change.)
from viennagrid.wrapper import TriangularCartesian3D_Domain as Domain
from viennagrid.wrapper import TriangularCartesian3D_Segmentation as Segmentation
from viennagrid.wrapper import read_netgen
import time
# In case we want to read only the domain information from the mesh file, we would
# just create an empty domain and call the Netgen reader on it with the file path
# where the mesh file can be found.
domain = Domain()
start_time = time.time()
read_netgen('../data/half-trigate.mesh', domain)
end_time = time.time()
elapsed_time = end_time - start_time
print('Elapsed time: ', elapsed_time, ' seconds (', elapsed_time / 60, ')', sep='')
print('Read domain with', domain.num_vertices, 'vertices')
# In case we want to read not only the domain information, but also the segmentation
# information from the mesh file, we would have to create an empty domain and an
# empty segmentation on that domain, and then call the Netgen reader.
domain = Domain()
segmentation = Segmentation(domain)
start_time = time.time()
read_netgen('../data/half-trigate.mesh', domain, segmentation)
end_time = time.time()
elapsed_time = end_time - start_time
print('Elapsed time: ', elapsed_time, ' seconds (', elapsed_time / 60, ')', sep='')
print('Read domain with', domain.num_vertices, 'vertices')
print('Read segmentation with', segmentation.num_segments, 'segmets')
for i, seg in enumerate(segmentation.segments):
print('Segment #', i, ' contains ', seg.num_cells, ' cells', sep='')
|
<commit_before><commit_msg>Create a copy of the Netgen reader example and add some statistics calculation.
These additions calculate the elapsed time (the time that the reader has taken in order to read the mesh file), the number of vertices in the domain and the number of cells in the segmentation (if applicable).<commit_after>
|
#!/usr/bin/env python
#
# This example shows is like the readers and writers example ('io.py'),
# but this one also calculates some statistics on the elapsed time, the
# number of vertices an cells read, etc.
from __future__ import print_function
# In this example, we will set up a domain of triangles in the cartesian 3D
# space from the contents of a Netgen mesh file.
#
# For that purpose, we need to define a domain and, eventually, also a segmentation
# (in case we want to read segmentation data from the mesh file), and we need the
# Netgen reader function, too.
#
# (Notice that the 'read_netgen' function and all other I/O functions
# work with any type of domain and segmentation without name change.)
from viennagrid.wrapper import TriangularCartesian3D_Domain as Domain
from viennagrid.wrapper import TriangularCartesian3D_Segmentation as Segmentation
from viennagrid.wrapper import read_netgen
import time
# In case we want to read only the domain information from the mesh file, we would
# just create an empty domain and call the Netgen reader on it with the file path
# where the mesh file can be found.
domain = Domain()
start_time = time.time()
read_netgen('../data/half-trigate.mesh', domain)
end_time = time.time()
elapsed_time = end_time - start_time
print('Elapsed time: ', elapsed_time, ' seconds (', elapsed_time / 60, ')', sep='')
print('Read domain with', domain.num_vertices, 'vertices')
# In case we want to read not only the domain information, but also the segmentation
# information from the mesh file, we would have to create an empty domain and an
# empty segmentation on that domain, and then call the Netgen reader.
domain = Domain()
segmentation = Segmentation(domain)
start_time = time.time()
read_netgen('../data/half-trigate.mesh', domain, segmentation)
end_time = time.time()
elapsed_time = end_time - start_time
print('Elapsed time: ', elapsed_time, ' seconds (', elapsed_time / 60, ')', sep='')
print('Read domain with', domain.num_vertices, 'vertices')
print('Read segmentation with', segmentation.num_segments, 'segmets')
for i, seg in enumerate(segmentation.segments):
print('Segment #', i, ' contains ', seg.num_cells, ' cells', sep='')
|
Create a copy of the Netgen reader example and add some statistics calculation.
These additions calculate the elapsed time (the time that the reader has taken in order to read the mesh file), the number of vertices in the domain and the number of cells in the segmentation (if applicable).#!/usr/bin/env python
#
# This example shows is like the readers and writers example ('io.py'),
# but this one also calculates some statistics on the elapsed time, the
# number of vertices an cells read, etc.
from __future__ import print_function
# In this example, we will set up a domain of triangles in the cartesian 3D
# space from the contents of a Netgen mesh file.
#
# For that purpose, we need to define a domain and, eventually, also a segmentation
# (in case we want to read segmentation data from the mesh file), and we need the
# Netgen reader function, too.
#
# (Notice that the 'read_netgen' function and all other I/O functions
# work with any type of domain and segmentation without name change.)
from viennagrid.wrapper import TriangularCartesian3D_Domain as Domain
from viennagrid.wrapper import TriangularCartesian3D_Segmentation as Segmentation
from viennagrid.wrapper import read_netgen
import time
# In case we want to read only the domain information from the mesh file, we would
# just create an empty domain and call the Netgen reader on it with the file path
# where the mesh file can be found.
domain = Domain()
start_time = time.time()
read_netgen('../data/half-trigate.mesh', domain)
end_time = time.time()
elapsed_time = end_time - start_time
print('Elapsed time: ', elapsed_time, ' seconds (', elapsed_time / 60, ')', sep='')
print('Read domain with', domain.num_vertices, 'vertices')
# In case we want to read not only the domain information, but also the segmentation
# information from the mesh file, we would have to create an empty domain and an
# empty segmentation on that domain, and then call the Netgen reader.
domain = Domain()
segmentation = Segmentation(domain)
start_time = time.time()
read_netgen('../data/half-trigate.mesh', domain, segmentation)
end_time = time.time()
elapsed_time = end_time - start_time
print('Elapsed time: ', elapsed_time, ' seconds (', elapsed_time / 60, ')', sep='')
print('Read domain with', domain.num_vertices, 'vertices')
print('Read segmentation with', segmentation.num_segments, 'segmets')
for i, seg in enumerate(segmentation.segments):
print('Segment #', i, ' contains ', seg.num_cells, ' cells', sep='')
|
<commit_before><commit_msg>Create a copy of the Netgen reader example and add some statistics calculation.
These additions calculate the elapsed time (the time that the reader has taken in order to read the mesh file), the number of vertices in the domain and the number of cells in the segmentation (if applicable).<commit_after>#!/usr/bin/env python
#
# This example shows is like the readers and writers example ('io.py'),
# but this one also calculates some statistics on the elapsed time, the
# number of vertices an cells read, etc.
from __future__ import print_function
# In this example, we will set up a domain of triangles in the cartesian 3D
# space from the contents of a Netgen mesh file.
#
# For that purpose, we need to define a domain and, eventually, also a segmentation
# (in case we want to read segmentation data from the mesh file), and we need the
# Netgen reader function, too.
#
# (Notice that the 'read_netgen' function and all other I/O functions
# work with any type of domain and segmentation without name change.)
from viennagrid.wrapper import TriangularCartesian3D_Domain as Domain
from viennagrid.wrapper import TriangularCartesian3D_Segmentation as Segmentation
from viennagrid.wrapper import read_netgen
import time
# In case we want to read only the domain information from the mesh file, we would
# just create an empty domain and call the Netgen reader on it with the file path
# where the mesh file can be found.
domain = Domain()
start_time = time.time()
read_netgen('../data/half-trigate.mesh', domain)
end_time = time.time()
elapsed_time = end_time - start_time
print('Elapsed time: ', elapsed_time, ' seconds (', elapsed_time / 60, ')', sep='')
print('Read domain with', domain.num_vertices, 'vertices')
# In case we want to read not only the domain information, but also the segmentation
# information from the mesh file, we would have to create an empty domain and an
# empty segmentation on that domain, and then call the Netgen reader.
domain = Domain()
segmentation = Segmentation(domain)
start_time = time.time()
read_netgen('../data/half-trigate.mesh', domain, segmentation)
end_time = time.time()
elapsed_time = end_time - start_time
print('Elapsed time: ', elapsed_time, ' seconds (', elapsed_time / 60, ')', sep='')
print('Read domain with', domain.num_vertices, 'vertices')
print('Read segmentation with', segmentation.num_segments, 'segmets')
for i, seg in enumerate(segmentation.segments):
print('Segment #', i, ' contains ', seg.num_cells, ' cells', sep='')
|
|
e1bef44be34efd637bc2acdaf71f01b5d77deaec
|
edisgo/flex_opt/storage_integration.py
|
edisgo/flex_opt/storage_integration.py
|
from edisgo.grid.components import Storage, Line
from edisgo.grid.tools import select_cable
import logging
def integrate_storage(network, position, operation):
"""
Integrate storage units in the grid and specify its operational mode
Parameters
----------
network: :class:`~.grid.network.Network`
The eDisGo container object
position : str
Specify storage location. Available options are
* 'hvmv_substation_busbar'
operation : str
Specify mode of storage operation
"""
if position == 'hvmv_substation_busbar':
storage_at_hvmv_substation(network.mv_grid)
else:
logging.error("{} is not a valid storage positioning mode".format(
position))
raise ValueError("Unknown parameter for storage posisitioning: {} is "
"not a valid storage positioning mode".format(
position))
def storage_at_hvmv_substation(mv_grid, nominal_capacity=1000):
"""
Place 1 MVA battery at HV/MV substation bus bar
As this is currently a dummy implementation the storage operation is as
simple as follows:
* Feedin > 50 % -> charge at full power
* Feedin < 50 % -> discharge at full power
Parameters
----------
mv_grid : :class:`~.grid.grids.MVGrid`
MV grid instance
nominal_capacity : float
Storage's apparent rated power
"""
# define storage instance and define it's operational mode
storage_id = len(mv_grid.graph.nodes_by_attribute('storage')) + 1
storage = Storage(operation={'mode': 'fifty-fifty'},
id=storage_id,
nominal_capacity=nominal_capacity)
# add storage itself to graph
mv_grid.graph.add_nodes_from(storage, type='storage')
# add 1m connecting line to hv/mv substation bus bar
line_type, _ = select_cable(mv_grid.network, 'mv', nominal_capacity)
line = [mv_grid.station, storage,
{'line': Line(
id=storage_id,
type=line_type,
kind='cable',
length=1,
grid=mv_grid)
}]
mv_grid.graph.add_edges_from(line, type='line')
|
Add demo case storage integration
|
Add demo case storage integration
|
Python
|
agpl-3.0
|
openego/eDisGo,openego/eDisGo
|
Add demo case storage integration
|
from edisgo.grid.components import Storage, Line
from edisgo.grid.tools import select_cable
import logging
def integrate_storage(network, position, operation):
"""
Integrate storage units in the grid and specify its operational mode
Parameters
----------
network: :class:`~.grid.network.Network`
The eDisGo container object
position : str
Specify storage location. Available options are
* 'hvmv_substation_busbar'
operation : str
Specify mode of storage operation
"""
if position == 'hvmv_substation_busbar':
storage_at_hvmv_substation(network.mv_grid)
else:
logging.error("{} is not a valid storage positioning mode".format(
position))
raise ValueError("Unknown parameter for storage posisitioning: {} is "
"not a valid storage positioning mode".format(
position))
def storage_at_hvmv_substation(mv_grid, nominal_capacity=1000):
"""
Place 1 MVA battery at HV/MV substation bus bar
As this is currently a dummy implementation the storage operation is as
simple as follows:
* Feedin > 50 % -> charge at full power
* Feedin < 50 % -> discharge at full power
Parameters
----------
mv_grid : :class:`~.grid.grids.MVGrid`
MV grid instance
nominal_capacity : float
Storage's apparent rated power
"""
# define storage instance and define it's operational mode
storage_id = len(mv_grid.graph.nodes_by_attribute('storage')) + 1
storage = Storage(operation={'mode': 'fifty-fifty'},
id=storage_id,
nominal_capacity=nominal_capacity)
# add storage itself to graph
mv_grid.graph.add_nodes_from(storage, type='storage')
# add 1m connecting line to hv/mv substation bus bar
line_type, _ = select_cable(mv_grid.network, 'mv', nominal_capacity)
line = [mv_grid.station, storage,
{'line': Line(
id=storage_id,
type=line_type,
kind='cable',
length=1,
grid=mv_grid)
}]
mv_grid.graph.add_edges_from(line, type='line')
|
<commit_before><commit_msg>Add demo case storage integration<commit_after>
|
from edisgo.grid.components import Storage, Line
from edisgo.grid.tools import select_cable
import logging
def integrate_storage(network, position, operation):
"""
Integrate storage units in the grid and specify its operational mode
Parameters
----------
network: :class:`~.grid.network.Network`
The eDisGo container object
position : str
Specify storage location. Available options are
* 'hvmv_substation_busbar'
operation : str
Specify mode of storage operation
"""
if position == 'hvmv_substation_busbar':
storage_at_hvmv_substation(network.mv_grid)
else:
logging.error("{} is not a valid storage positioning mode".format(
position))
raise ValueError("Unknown parameter for storage posisitioning: {} is "
"not a valid storage positioning mode".format(
position))
def storage_at_hvmv_substation(mv_grid, nominal_capacity=1000):
"""
Place 1 MVA battery at HV/MV substation bus bar
As this is currently a dummy implementation the storage operation is as
simple as follows:
* Feedin > 50 % -> charge at full power
* Feedin < 50 % -> discharge at full power
Parameters
----------
mv_grid : :class:`~.grid.grids.MVGrid`
MV grid instance
nominal_capacity : float
Storage's apparent rated power
"""
# define storage instance and define it's operational mode
storage_id = len(mv_grid.graph.nodes_by_attribute('storage')) + 1
storage = Storage(operation={'mode': 'fifty-fifty'},
id=storage_id,
nominal_capacity=nominal_capacity)
# add storage itself to graph
mv_grid.graph.add_nodes_from(storage, type='storage')
# add 1m connecting line to hv/mv substation bus bar
line_type, _ = select_cable(mv_grid.network, 'mv', nominal_capacity)
line = [mv_grid.station, storage,
{'line': Line(
id=storage_id,
type=line_type,
kind='cable',
length=1,
grid=mv_grid)
}]
mv_grid.graph.add_edges_from(line, type='line')
|
Add demo case storage integrationfrom edisgo.grid.components import Storage, Line
from edisgo.grid.tools import select_cable
import logging
def integrate_storage(network, position, operation):
"""
Integrate storage units in the grid and specify its operational mode
Parameters
----------
network: :class:`~.grid.network.Network`
The eDisGo container object
position : str
Specify storage location. Available options are
* 'hvmv_substation_busbar'
operation : str
Specify mode of storage operation
"""
if position == 'hvmv_substation_busbar':
storage_at_hvmv_substation(network.mv_grid)
else:
logging.error("{} is not a valid storage positioning mode".format(
position))
raise ValueError("Unknown parameter for storage posisitioning: {} is "
"not a valid storage positioning mode".format(
position))
def storage_at_hvmv_substation(mv_grid, nominal_capacity=1000):
"""
Place 1 MVA battery at HV/MV substation bus bar
As this is currently a dummy implementation the storage operation is as
simple as follows:
* Feedin > 50 % -> charge at full power
* Feedin < 50 % -> discharge at full power
Parameters
----------
mv_grid : :class:`~.grid.grids.MVGrid`
MV grid instance
nominal_capacity : float
Storage's apparent rated power
"""
# define storage instance and define it's operational mode
storage_id = len(mv_grid.graph.nodes_by_attribute('storage')) + 1
storage = Storage(operation={'mode': 'fifty-fifty'},
id=storage_id,
nominal_capacity=nominal_capacity)
# add storage itself to graph
mv_grid.graph.add_nodes_from(storage, type='storage')
# add 1m connecting line to hv/mv substation bus bar
line_type, _ = select_cable(mv_grid.network, 'mv', nominal_capacity)
line = [mv_grid.station, storage,
{'line': Line(
id=storage_id,
type=line_type,
kind='cable',
length=1,
grid=mv_grid)
}]
mv_grid.graph.add_edges_from(line, type='line')
|
<commit_before><commit_msg>Add demo case storage integration<commit_after>from edisgo.grid.components import Storage, Line
from edisgo.grid.tools import select_cable
import logging
def integrate_storage(network, position, operation):
"""
Integrate storage units in the grid and specify its operational mode
Parameters
----------
network: :class:`~.grid.network.Network`
The eDisGo container object
position : str
Specify storage location. Available options are
* 'hvmv_substation_busbar'
operation : str
Specify mode of storage operation
"""
if position == 'hvmv_substation_busbar':
storage_at_hvmv_substation(network.mv_grid)
else:
logging.error("{} is not a valid storage positioning mode".format(
position))
raise ValueError("Unknown parameter for storage posisitioning: {} is "
"not a valid storage positioning mode".format(
position))
def storage_at_hvmv_substation(mv_grid, nominal_capacity=1000):
"""
Place 1 MVA battery at HV/MV substation bus bar
As this is currently a dummy implementation the storage operation is as
simple as follows:
* Feedin > 50 % -> charge at full power
* Feedin < 50 % -> discharge at full power
Parameters
----------
mv_grid : :class:`~.grid.grids.MVGrid`
MV grid instance
nominal_capacity : float
Storage's apparent rated power
"""
# define storage instance and define it's operational mode
storage_id = len(mv_grid.graph.nodes_by_attribute('storage')) + 1
storage = Storage(operation={'mode': 'fifty-fifty'},
id=storage_id,
nominal_capacity=nominal_capacity)
# add storage itself to graph
mv_grid.graph.add_nodes_from(storage, type='storage')
# add 1m connecting line to hv/mv substation bus bar
line_type, _ = select_cable(mv_grid.network, 'mv', nominal_capacity)
line = [mv_grid.station, storage,
{'line': Line(
id=storage_id,
type=line_type,
kind='cable',
length=1,
grid=mv_grid)
}]
mv_grid.graph.add_edges_from(line, type='line')
|
|
ff9cbbac188f78ed33cb2f650a32777713911384
|
examples/python/monochrome_pipeline.py
|
examples/python/monochrome_pipeline.py
|
import gst
import gobject
import os
VIDEODEVICE = "/dev/video1"
WIDTH = 1280
HEIGHT = 960
FRAMERATE = "15/1"
try:
import psutil
except ImportError:
psutil = None
def show_resources_cb (*args):
process = psutil.Process(os.getpid())
if getattr(process, "memory_info"):
print ("Resource usage: %dkB" % (int(process.memory_info()[0]) / 1024))
elif getattr (process, "get_memory_info"):
print ("Resource usage: %dkB" % (int(process.memory_info()[0]) / 1024))
else:
print ("Unsupported psutil module version")
return True
def bus_watch(bus, message):
if message.type == gst.MESSAGE_ERROR:
print ("Got error message: ", message)
return True
loop = gobject.MainLoop()
source = gst.element_factory_make ("v4l2src")
source.set_property("device", VIDEODEVICE)
flt1 = gst.element_factory_make ("capsfilter")
flt1.set_property("caps", gst.Caps("video/x-raw-gray,width=%d,height=%d,framerate=(fraction)%s" % (WIDTH, HEIGHT, FRAMERATE)))
autoexp = gst.element_factory_make ("tis_auto_exposure")
autoexp.set_property("auto-exposure", True)
bufferfilter = gst.element_factory_make ("tisvideobufferfilter")
csp = gst.element_factory_make ("ffmpegcolorspace")
scale = gst.element_factory_make ("videoscale")
flt2 = gst.element_factory_make ("capsfilter")
flt2.set_property("caps", gst.Caps("video/x-raw-yuv,width=640,height=480"))
sink = gst.element_factory_make ("xvimagesink")
pipeline = gst.Pipeline()
pipeline.get_bus().add_watch(bus_watch)
pipeline.add_many (source, flt1, autoexp, bufferfilter, csp, scale, flt2, sink)
source.link(flt1)
flt1.link(autoexp)
autoexp.link(bufferfilter)
bufferfilter.link(csp)
csp.link(scale)
scale.link(flt2)
flt2.link(sink)
print ("Starting Pipeline")
pipeline.set_state(gst.STATE_PLAYING)
if psutil:
gobject.timeout_add_seconds (1,show_resources_cb)
else:
print ("Install psutil package to get resource usage information")
loop.run()
|
Add small demo for a monochrome GST pipeline
|
Add small demo for a monochrome GST pipeline
|
Python
|
apache-2.0
|
TheImagingSource/tiscamera,TheImagingSource/tiscamera,TheImagingSource/tiscamera,TheImagingSource/tiscamera
|
Add small demo for a monochrome GST pipeline
|
import gst
import gobject
import os
VIDEODEVICE = "/dev/video1"
WIDTH = 1280
HEIGHT = 960
FRAMERATE = "15/1"
try:
import psutil
except ImportError:
psutil = None
def show_resources_cb (*args):
process = psutil.Process(os.getpid())
if getattr(process, "memory_info"):
print ("Resource usage: %dkB" % (int(process.memory_info()[0]) / 1024))
elif getattr (process, "get_memory_info"):
print ("Resource usage: %dkB" % (int(process.memory_info()[0]) / 1024))
else:
print ("Unsupported psutil module version")
return True
def bus_watch(bus, message):
if message.type == gst.MESSAGE_ERROR:
print ("Got error message: ", message)
return True
loop = gobject.MainLoop()
source = gst.element_factory_make ("v4l2src")
source.set_property("device", VIDEODEVICE)
flt1 = gst.element_factory_make ("capsfilter")
flt1.set_property("caps", gst.Caps("video/x-raw-gray,width=%d,height=%d,framerate=(fraction)%s" % (WIDTH, HEIGHT, FRAMERATE)))
autoexp = gst.element_factory_make ("tis_auto_exposure")
autoexp.set_property("auto-exposure", True)
bufferfilter = gst.element_factory_make ("tisvideobufferfilter")
csp = gst.element_factory_make ("ffmpegcolorspace")
scale = gst.element_factory_make ("videoscale")
flt2 = gst.element_factory_make ("capsfilter")
flt2.set_property("caps", gst.Caps("video/x-raw-yuv,width=640,height=480"))
sink = gst.element_factory_make ("xvimagesink")
pipeline = gst.Pipeline()
pipeline.get_bus().add_watch(bus_watch)
pipeline.add_many (source, flt1, autoexp, bufferfilter, csp, scale, flt2, sink)
source.link(flt1)
flt1.link(autoexp)
autoexp.link(bufferfilter)
bufferfilter.link(csp)
csp.link(scale)
scale.link(flt2)
flt2.link(sink)
print ("Starting Pipeline")
pipeline.set_state(gst.STATE_PLAYING)
if psutil:
gobject.timeout_add_seconds (1,show_resources_cb)
else:
print ("Install psutil package to get resource usage information")
loop.run()
|
<commit_before><commit_msg>Add small demo for a monochrome GST pipeline<commit_after>
|
import gst
import gobject
import os
VIDEODEVICE = "/dev/video1"
WIDTH = 1280
HEIGHT = 960
FRAMERATE = "15/1"
try:
import psutil
except ImportError:
psutil = None
def show_resources_cb (*args):
process = psutil.Process(os.getpid())
if getattr(process, "memory_info"):
print ("Resource usage: %dkB" % (int(process.memory_info()[0]) / 1024))
elif getattr (process, "get_memory_info"):
print ("Resource usage: %dkB" % (int(process.memory_info()[0]) / 1024))
else:
print ("Unsupported psutil module version")
return True
def bus_watch(bus, message):
if message.type == gst.MESSAGE_ERROR:
print ("Got error message: ", message)
return True
loop = gobject.MainLoop()
source = gst.element_factory_make ("v4l2src")
source.set_property("device", VIDEODEVICE)
flt1 = gst.element_factory_make ("capsfilter")
flt1.set_property("caps", gst.Caps("video/x-raw-gray,width=%d,height=%d,framerate=(fraction)%s" % (WIDTH, HEIGHT, FRAMERATE)))
autoexp = gst.element_factory_make ("tis_auto_exposure")
autoexp.set_property("auto-exposure", True)
bufferfilter = gst.element_factory_make ("tisvideobufferfilter")
csp = gst.element_factory_make ("ffmpegcolorspace")
scale = gst.element_factory_make ("videoscale")
flt2 = gst.element_factory_make ("capsfilter")
flt2.set_property("caps", gst.Caps("video/x-raw-yuv,width=640,height=480"))
sink = gst.element_factory_make ("xvimagesink")
pipeline = gst.Pipeline()
pipeline.get_bus().add_watch(bus_watch)
pipeline.add_many (source, flt1, autoexp, bufferfilter, csp, scale, flt2, sink)
source.link(flt1)
flt1.link(autoexp)
autoexp.link(bufferfilter)
bufferfilter.link(csp)
csp.link(scale)
scale.link(flt2)
flt2.link(sink)
print ("Starting Pipeline")
pipeline.set_state(gst.STATE_PLAYING)
if psutil:
gobject.timeout_add_seconds (1,show_resources_cb)
else:
print ("Install psutil package to get resource usage information")
loop.run()
|
Add small demo for a monochrome GST pipelineimport gst
import gobject
import os
VIDEODEVICE = "/dev/video1"
WIDTH = 1280
HEIGHT = 960
FRAMERATE = "15/1"
try:
import psutil
except ImportError:
psutil = None
def show_resources_cb (*args):
process = psutil.Process(os.getpid())
if getattr(process, "memory_info"):
print ("Resource usage: %dkB" % (int(process.memory_info()[0]) / 1024))
elif getattr (process, "get_memory_info"):
print ("Resource usage: %dkB" % (int(process.memory_info()[0]) / 1024))
else:
print ("Unsupported psutil module version")
return True
def bus_watch(bus, message):
if message.type == gst.MESSAGE_ERROR:
print ("Got error message: ", message)
return True
loop = gobject.MainLoop()
source = gst.element_factory_make ("v4l2src")
source.set_property("device", VIDEODEVICE)
flt1 = gst.element_factory_make ("capsfilter")
flt1.set_property("caps", gst.Caps("video/x-raw-gray,width=%d,height=%d,framerate=(fraction)%s" % (WIDTH, HEIGHT, FRAMERATE)))
autoexp = gst.element_factory_make ("tis_auto_exposure")
autoexp.set_property("auto-exposure", True)
bufferfilter = gst.element_factory_make ("tisvideobufferfilter")
csp = gst.element_factory_make ("ffmpegcolorspace")
scale = gst.element_factory_make ("videoscale")
flt2 = gst.element_factory_make ("capsfilter")
flt2.set_property("caps", gst.Caps("video/x-raw-yuv,width=640,height=480"))
sink = gst.element_factory_make ("xvimagesink")
pipeline = gst.Pipeline()
pipeline.get_bus().add_watch(bus_watch)
pipeline.add_many (source, flt1, autoexp, bufferfilter, csp, scale, flt2, sink)
source.link(flt1)
flt1.link(autoexp)
autoexp.link(bufferfilter)
bufferfilter.link(csp)
csp.link(scale)
scale.link(flt2)
flt2.link(sink)
print ("Starting Pipeline")
pipeline.set_state(gst.STATE_PLAYING)
if psutil:
gobject.timeout_add_seconds (1,show_resources_cb)
else:
print ("Install psutil package to get resource usage information")
loop.run()
|
<commit_before><commit_msg>Add small demo for a monochrome GST pipeline<commit_after>import gst
import gobject
import os
VIDEODEVICE = "/dev/video1"
WIDTH = 1280
HEIGHT = 960
FRAMERATE = "15/1"
try:
import psutil
except ImportError:
psutil = None
def show_resources_cb (*args):
process = psutil.Process(os.getpid())
if getattr(process, "memory_info"):
print ("Resource usage: %dkB" % (int(process.memory_info()[0]) / 1024))
elif getattr (process, "get_memory_info"):
print ("Resource usage: %dkB" % (int(process.memory_info()[0]) / 1024))
else:
print ("Unsupported psutil module version")
return True
def bus_watch(bus, message):
if message.type == gst.MESSAGE_ERROR:
print ("Got error message: ", message)
return True
loop = gobject.MainLoop()
source = gst.element_factory_make ("v4l2src")
source.set_property("device", VIDEODEVICE)
flt1 = gst.element_factory_make ("capsfilter")
flt1.set_property("caps", gst.Caps("video/x-raw-gray,width=%d,height=%d,framerate=(fraction)%s" % (WIDTH, HEIGHT, FRAMERATE)))
autoexp = gst.element_factory_make ("tis_auto_exposure")
autoexp.set_property("auto-exposure", True)
bufferfilter = gst.element_factory_make ("tisvideobufferfilter")
csp = gst.element_factory_make ("ffmpegcolorspace")
scale = gst.element_factory_make ("videoscale")
flt2 = gst.element_factory_make ("capsfilter")
flt2.set_property("caps", gst.Caps("video/x-raw-yuv,width=640,height=480"))
sink = gst.element_factory_make ("xvimagesink")
pipeline = gst.Pipeline()
pipeline.get_bus().add_watch(bus_watch)
pipeline.add_many (source, flt1, autoexp, bufferfilter, csp, scale, flt2, sink)
source.link(flt1)
flt1.link(autoexp)
autoexp.link(bufferfilter)
bufferfilter.link(csp)
csp.link(scale)
scale.link(flt2)
flt2.link(sink)
print ("Starting Pipeline")
pipeline.set_state(gst.STATE_PLAYING)
if psutil:
gobject.timeout_add_seconds (1,show_resources_cb)
else:
print ("Install psutil package to get resource usage information")
loop.run()
|
|
08a6dddb866ec53ff45a302d7c163d041bbefe71
|
protoplot-test/test_options_resolving.py
|
protoplot-test/test_options_resolving.py
|
import unittest
from protoplot.engine.item import Item
from protoplot.engine.item_container import ItemContainer
class Series(Item):
pass
Series.options.register("color", True)
Series.options.register("lineWidth", False)
Series.options.register("lineStyle", False)
class TestOptionsResolving(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testOptionsResolving(self):
pass
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
Add stub unit test for options resolving
|
Add stub unit test for options resolving
|
Python
|
agpl-3.0
|
deffi/protoplot
|
Add stub unit test for options resolving
|
import unittest
from protoplot.engine.item import Item
from protoplot.engine.item_container import ItemContainer
class Series(Item):
pass
Series.options.register("color", True)
Series.options.register("lineWidth", False)
Series.options.register("lineStyle", False)
class TestOptionsResolving(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testOptionsResolving(self):
pass
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
<commit_before><commit_msg>Add stub unit test for options resolving<commit_after>
|
import unittest
from protoplot.engine.item import Item
from protoplot.engine.item_container import ItemContainer
class Series(Item):
pass
Series.options.register("color", True)
Series.options.register("lineWidth", False)
Series.options.register("lineStyle", False)
class TestOptionsResolving(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testOptionsResolving(self):
pass
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
Add stub unit test for options resolvingimport unittest
from protoplot.engine.item import Item
from protoplot.engine.item_container import ItemContainer
class Series(Item):
pass
Series.options.register("color", True)
Series.options.register("lineWidth", False)
Series.options.register("lineStyle", False)
class TestOptionsResolving(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testOptionsResolving(self):
pass
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
<commit_before><commit_msg>Add stub unit test for options resolving<commit_after>import unittest
from protoplot.engine.item import Item
from protoplot.engine.item_container import ItemContainer
class Series(Item):
pass
Series.options.register("color", True)
Series.options.register("lineWidth", False)
Series.options.register("lineStyle", False)
class TestOptionsResolving(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testOptionsResolving(self):
pass
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
|
585c9b6f1c8bf186fee34303ba29b7b511c1ba7e
|
mzalendo/core/management/commands/core_match_places_to_mapit_areas_2013.py
|
mzalendo/core/management/commands/core_match_places_to_mapit_areas_2013.py
|
import sys
from optparse import make_option
from pprint import pprint
from django.core.management.base import NoArgsCommand
from django.template.defaultfilters import slugify
from django.conf import settings
# from helpers import geocode
from core import models
from mapit import models as mapit_models
class Command(NoArgsCommand):
help = 'Link places to areas in mapit for the new 2013 places'
option_list = NoArgsCommand.option_list + (
make_option('--commit', action='store_true', dest='commit', help='Actually update the database'),
)
def handle_noargs(self, **options):
self.match_for_types(type_code='con',
mapit_generation=3,
place_kind_slug='constituency',
session_slug='na2013',
commit=options['commit'])
self.match_for_types(type_code='dis',
mapit_generation=3,
place_kind_slug='county',
session_slug='s2013',
commit=options['commit'],
suffix=True)
def match_for_types(self, type_code, mapit_generation, place_kind_slug, session_slug, commit, suffix=False):
# Get these even if not used so that we know that they exist
area_type = mapit_models.Type.objects.get( code = type_code )
generation = mapit_models.Generation.objects.get( pk = mapit_generation )
place_kind = models.PlaceKind.objects.get( slug = place_kind_slug )
session = models.ParliamentarySession.objects.get(slug = session_slug)
# Find all relevant areas to match
areas = mapit_models.Area.objects.filter(type=area_type,
generation_low__lte=generation,
generation_high__gte=generation)
all_places = set(models.Place.objects.filter(kind=place_kind, parliamentary_session=session))
for area in areas:
# Use the slug for matching, easiest way to normalize
slug = slugify( area.name )
if suffix:
slug += '-' + place_kind.slug
else:
slug += '-2013'
# find it and update, or print out an error for a human to follow up
try:
place = models.Place.objects.get(slug=slug,
kind=place_kind,
parliamentary_session=session)
place.mapit_area = area
if commit:
print >> sys.stderr, "Saving", place
place.save()
else:
print >> sys.stderr, "Not saving %s, since --commit wasn't specified" % (place,)
all_places.discard(place)
except models.Place.DoesNotExist:
print "Could not find matching place for mapit area '%s' (%s, %s)" % ( area.name, slug, place_kind_slug )
if all_places:
for place in all_places:
print "Could not find the place %s in MapIt (%s)" % (place, slugify(place.name))
|
Add a script to add mapit area IDs to new Place objects for 2013
|
Add a script to add mapit area IDs to new Place objects for 2013
This is variant of the existing core_match_places_to_mapit_areas
command by Edmund von der Burg, but which is also aware of
ParliamentarySesssions and has some extra debugging output.
|
Python
|
agpl-3.0
|
geoffkilpin/pombola,patricmutwiri/pombola,hzj123/56th,hzj123/56th,hzj123/56th,ken-muturi/pombola,ken-muturi/pombola,patricmutwiri/pombola,geoffkilpin/pombola,patricmutwiri/pombola,hzj123/56th,mysociety/pombola,hzj123/56th,ken-muturi/pombola,geoffkilpin/pombola,patricmutwiri/pombola,ken-muturi/pombola,patricmutwiri/pombola,geoffkilpin/pombola,mysociety/pombola,geoffkilpin/pombola,patricmutwiri/pombola,hzj123/56th,mysociety/pombola,ken-muturi/pombola,mysociety/pombola,ken-muturi/pombola,mysociety/pombola,geoffkilpin/pombola,mysociety/pombola
|
Add a script to add mapit area IDs to new Place objects for 2013
This is variant of the existing core_match_places_to_mapit_areas
command by Edmund von der Burg, but which is also aware of
ParliamentarySesssions and has some extra debugging output.
|
import sys
from optparse import make_option
from pprint import pprint
from django.core.management.base import NoArgsCommand
from django.template.defaultfilters import slugify
from django.conf import settings
# from helpers import geocode
from core import models
from mapit import models as mapit_models
class Command(NoArgsCommand):
help = 'Link places to areas in mapit for the new 2013 places'
option_list = NoArgsCommand.option_list + (
make_option('--commit', action='store_true', dest='commit', help='Actually update the database'),
)
def handle_noargs(self, **options):
self.match_for_types(type_code='con',
mapit_generation=3,
place_kind_slug='constituency',
session_slug='na2013',
commit=options['commit'])
self.match_for_types(type_code='dis',
mapit_generation=3,
place_kind_slug='county',
session_slug='s2013',
commit=options['commit'],
suffix=True)
def match_for_types(self, type_code, mapit_generation, place_kind_slug, session_slug, commit, suffix=False):
# Get these even if not used so that we know that they exist
area_type = mapit_models.Type.objects.get( code = type_code )
generation = mapit_models.Generation.objects.get( pk = mapit_generation )
place_kind = models.PlaceKind.objects.get( slug = place_kind_slug )
session = models.ParliamentarySession.objects.get(slug = session_slug)
# Find all relevant areas to match
areas = mapit_models.Area.objects.filter(type=area_type,
generation_low__lte=generation,
generation_high__gte=generation)
all_places = set(models.Place.objects.filter(kind=place_kind, parliamentary_session=session))
for area in areas:
# Use the slug for matching, easiest way to normalize
slug = slugify( area.name )
if suffix:
slug += '-' + place_kind.slug
else:
slug += '-2013'
# find it and update, or print out an error for a human to follow up
try:
place = models.Place.objects.get(slug=slug,
kind=place_kind,
parliamentary_session=session)
place.mapit_area = area
if commit:
print >> sys.stderr, "Saving", place
place.save()
else:
print >> sys.stderr, "Not saving %s, since --commit wasn't specified" % (place,)
all_places.discard(place)
except models.Place.DoesNotExist:
print "Could not find matching place for mapit area '%s' (%s, %s)" % ( area.name, slug, place_kind_slug )
if all_places:
for place in all_places:
print "Could not find the place %s in MapIt (%s)" % (place, slugify(place.name))
|
<commit_before><commit_msg>Add a script to add mapit area IDs to new Place objects for 2013
This is variant of the existing core_match_places_to_mapit_areas
command by Edmund von der Burg, but which is also aware of
ParliamentarySesssions and has some extra debugging output.<commit_after>
|
import sys
from optparse import make_option
from pprint import pprint
from django.core.management.base import NoArgsCommand
from django.template.defaultfilters import slugify
from django.conf import settings
# from helpers import geocode
from core import models
from mapit import models as mapit_models
class Command(NoArgsCommand):
help = 'Link places to areas in mapit for the new 2013 places'
option_list = NoArgsCommand.option_list + (
make_option('--commit', action='store_true', dest='commit', help='Actually update the database'),
)
def handle_noargs(self, **options):
self.match_for_types(type_code='con',
mapit_generation=3,
place_kind_slug='constituency',
session_slug='na2013',
commit=options['commit'])
self.match_for_types(type_code='dis',
mapit_generation=3,
place_kind_slug='county',
session_slug='s2013',
commit=options['commit'],
suffix=True)
def match_for_types(self, type_code, mapit_generation, place_kind_slug, session_slug, commit, suffix=False):
# Get these even if not used so that we know that they exist
area_type = mapit_models.Type.objects.get( code = type_code )
generation = mapit_models.Generation.objects.get( pk = mapit_generation )
place_kind = models.PlaceKind.objects.get( slug = place_kind_slug )
session = models.ParliamentarySession.objects.get(slug = session_slug)
# Find all relevant areas to match
areas = mapit_models.Area.objects.filter(type=area_type,
generation_low__lte=generation,
generation_high__gte=generation)
all_places = set(models.Place.objects.filter(kind=place_kind, parliamentary_session=session))
for area in areas:
# Use the slug for matching, easiest way to normalize
slug = slugify( area.name )
if suffix:
slug += '-' + place_kind.slug
else:
slug += '-2013'
# find it and update, or print out an error for a human to follow up
try:
place = models.Place.objects.get(slug=slug,
kind=place_kind,
parliamentary_session=session)
place.mapit_area = area
if commit:
print >> sys.stderr, "Saving", place
place.save()
else:
print >> sys.stderr, "Not saving %s, since --commit wasn't specified" % (place,)
all_places.discard(place)
except models.Place.DoesNotExist:
print "Could not find matching place for mapit area '%s' (%s, %s)" % ( area.name, slug, place_kind_slug )
if all_places:
for place in all_places:
print "Could not find the place %s in MapIt (%s)" % (place, slugify(place.name))
|
Add a script to add mapit area IDs to new Place objects for 2013
This is variant of the existing core_match_places_to_mapit_areas
command by Edmund von der Burg, but which is also aware of
ParliamentarySesssions and has some extra debugging output.import sys
from optparse import make_option
from pprint import pprint
from django.core.management.base import NoArgsCommand
from django.template.defaultfilters import slugify
from django.conf import settings
# from helpers import geocode
from core import models
from mapit import models as mapit_models
class Command(NoArgsCommand):
help = 'Link places to areas in mapit for the new 2013 places'
option_list = NoArgsCommand.option_list + (
make_option('--commit', action='store_true', dest='commit', help='Actually update the database'),
)
def handle_noargs(self, **options):
self.match_for_types(type_code='con',
mapit_generation=3,
place_kind_slug='constituency',
session_slug='na2013',
commit=options['commit'])
self.match_for_types(type_code='dis',
mapit_generation=3,
place_kind_slug='county',
session_slug='s2013',
commit=options['commit'],
suffix=True)
def match_for_types(self, type_code, mapit_generation, place_kind_slug, session_slug, commit, suffix=False):
# Get these even if not used so that we know that they exist
area_type = mapit_models.Type.objects.get( code = type_code )
generation = mapit_models.Generation.objects.get( pk = mapit_generation )
place_kind = models.PlaceKind.objects.get( slug = place_kind_slug )
session = models.ParliamentarySession.objects.get(slug = session_slug)
# Find all relevant areas to match
areas = mapit_models.Area.objects.filter(type=area_type,
generation_low__lte=generation,
generation_high__gte=generation)
all_places = set(models.Place.objects.filter(kind=place_kind, parliamentary_session=session))
for area in areas:
# Use the slug for matching, easiest way to normalize
slug = slugify( area.name )
if suffix:
slug += '-' + place_kind.slug
else:
slug += '-2013'
# find it and update, or print out an error for a human to follow up
try:
place = models.Place.objects.get(slug=slug,
kind=place_kind,
parliamentary_session=session)
place.mapit_area = area
if commit:
print >> sys.stderr, "Saving", place
place.save()
else:
print >> sys.stderr, "Not saving %s, since --commit wasn't specified" % (place,)
all_places.discard(place)
except models.Place.DoesNotExist:
print "Could not find matching place for mapit area '%s' (%s, %s)" % ( area.name, slug, place_kind_slug )
if all_places:
for place in all_places:
print "Could not find the place %s in MapIt (%s)" % (place, slugify(place.name))
|
<commit_before><commit_msg>Add a script to add mapit area IDs to new Place objects for 2013
This is variant of the existing core_match_places_to_mapit_areas
command by Edmund von der Burg, but which is also aware of
ParliamentarySesssions and has some extra debugging output.<commit_after>import sys
from optparse import make_option
from pprint import pprint
from django.core.management.base import NoArgsCommand
from django.template.defaultfilters import slugify
from django.conf import settings
# from helpers import geocode
from core import models
from mapit import models as mapit_models
class Command(NoArgsCommand):
help = 'Link places to areas in mapit for the new 2013 places'
option_list = NoArgsCommand.option_list + (
make_option('--commit', action='store_true', dest='commit', help='Actually update the database'),
)
def handle_noargs(self, **options):
self.match_for_types(type_code='con',
mapit_generation=3,
place_kind_slug='constituency',
session_slug='na2013',
commit=options['commit'])
self.match_for_types(type_code='dis',
mapit_generation=3,
place_kind_slug='county',
session_slug='s2013',
commit=options['commit'],
suffix=True)
def match_for_types(self, type_code, mapit_generation, place_kind_slug, session_slug, commit, suffix=False):
# Get these even if not used so that we know that they exist
area_type = mapit_models.Type.objects.get( code = type_code )
generation = mapit_models.Generation.objects.get( pk = mapit_generation )
place_kind = models.PlaceKind.objects.get( slug = place_kind_slug )
session = models.ParliamentarySession.objects.get(slug = session_slug)
# Find all relevant areas to match
areas = mapit_models.Area.objects.filter(type=area_type,
generation_low__lte=generation,
generation_high__gte=generation)
all_places = set(models.Place.objects.filter(kind=place_kind, parliamentary_session=session))
for area in areas:
# Use the slug for matching, easiest way to normalize
slug = slugify( area.name )
if suffix:
slug += '-' + place_kind.slug
else:
slug += '-2013'
# find it and update, or print out an error for a human to follow up
try:
place = models.Place.objects.get(slug=slug,
kind=place_kind,
parliamentary_session=session)
place.mapit_area = area
if commit:
print >> sys.stderr, "Saving", place
place.save()
else:
print >> sys.stderr, "Not saving %s, since --commit wasn't specified" % (place,)
all_places.discard(place)
except models.Place.DoesNotExist:
print "Could not find matching place for mapit area '%s' (%s, %s)" % ( area.name, slug, place_kind_slug )
if all_places:
for place in all_places:
print "Could not find the place %s in MapIt (%s)" % (place, slugify(place.name))
|
|
5fba93a26b6f09c20391ec18b281def2bd851650
|
tests/basics/for3.py
|
tests/basics/for3.py
|
# test assigning to iterator within the loop
for i in range(2):
print(i)
i = 2
# test assigning to range parameter within the loop
# (since we optimise for loops, this needs checking, currently it fails)
#n = 2
#for i in range(n):
# print(i)
# n = 0
|
Add test for semantics of for-loop that optimisation can break.
|
tests: Add test for semantics of for-loop that optimisation can break.
|
Python
|
mit
|
SungEun-Steve-Kim/test-mp,Vogtinator/micropython,turbinenreiter/micropython,HenrikSolver/micropython,drrk/micropython,alex-march/micropython,pozetroninc/micropython,blmorris/micropython,dmazzella/micropython,kerneltask/micropython,Timmenem/micropython,adamkh/micropython,adamkh/micropython,heisewangluo/micropython,infinnovation/micropython,SHA2017-badge/micropython-esp32,rubencabrera/micropython,dxxb/micropython,dinau/micropython,praemdonck/micropython,slzatz/micropython,firstval/micropython,toolmacher/micropython,slzatz/micropython,martinribelotta/micropython,ericsnowcurrently/micropython,galenhz/micropython,xyb/micropython,danicampora/micropython,adafruit/micropython,lbattraw/micropython,swegener/micropython,blmorris/micropython,praemdonck/micropython,heisewangluo/micropython,ernesto-g/micropython,SHA2017-badge/micropython-esp32,blazewicz/micropython,torwag/micropython,ahotam/micropython,ChuckM/micropython,jimkmc/micropython,bvernoux/micropython,mianos/micropython,ruffy91/micropython,alex-robbins/micropython,skybird6672/micropython,alex-march/micropython,warner83/micropython,ahotam/micropython,mgyenik/micropython,tuc-osg/micropython,jlillest/micropython,swegener/micropython,orionrobots/micropython,xyb/micropython,neilh10/micropython,redbear/micropython,ganshun666/micropython,xuxiaoxin/micropython,chrisdearman/micropython,tobbad/micropython,hosaka/micropython,misterdanb/micropython,adafruit/circuitpython,ChuckM/micropython,toolmacher/micropython,tdautc19841202/micropython,PappaPeppar/micropython,pozetroninc/micropython,vitiral/micropython,kostyll/micropython,suda/micropython,lowRISC/micropython,selste/micropython,henriknelson/micropython,adafruit/micropython,noahwilliamsson/micropython,praemdonck/micropython,neilh10/micropython,vriera/micropython,SungEun-Steve-Kim/test-mp,SHA2017-badge/micropython-esp32,adamkh/micropython,jimkmc/micropython,micropython/micropython-esp32,oopy/micropython,ganshun666/micropython,Vogtinator/micropython,kerneltask/micropython,vitiral/micropython,adamkh/micropython,kostyll/micropython,jmarcelino/pycom-micropython,orionrobots/micropython,orionrobots/micropython,aethaniel/micropython,hiway/micropython,suda/micropython,puuu/micropython,vriera/micropython,noahchense/micropython,cwyark/micropython,ryannathans/micropython,xuxiaoxin/micropython,MrSurly/micropython-esp32,jmarcelino/pycom-micropython,blazewicz/micropython,rubencabrera/micropython,pfalcon/micropython,cwyark/micropython,suda/micropython,ericsnowcurrently/micropython,praemdonck/micropython,mpalomer/micropython,emfcamp/micropython,tralamazza/micropython,hosaka/micropython,ganshun666/micropython,chrisdearman/micropython,pramasoul/micropython,EcmaXp/micropython,EcmaXp/micropython,redbear/micropython,jmarcelino/pycom-micropython,paul-xxx/micropython,cloudformdesign/micropython,dinau/micropython,lowRISC/micropython,Vogtinator/micropython,HenrikSolver/micropython,KISSMonX/micropython,puuu/micropython,torwag/micropython,toolmacher/micropython,blmorris/micropython,dinau/micropython,dhylands/micropython,alex-robbins/micropython,jmarcelino/pycom-micropython,PappaPeppar/micropython,KISSMonX/micropython,mhoffma/micropython,cnoviello/micropython,supergis/micropython,xyb/micropython,kerneltask/micropython,hiway/micropython,Timmenem/micropython,MrSurly/micropython,warner83/micropython,ericsnowcurrently/micropython,neilh10/micropython,emfcamp/micropython,MrSurly/micropython,ericsnowcurrently/micropython,blazewicz/micropython,utopiaprince/micropython,matthewelse/micropython,heisewangluo/micropython,xuxiaoxin/micropython,misterdanb/micropython,ryannathans/micropython,deshipu/micropython,Timmenem/micropython,blazewicz/micropython,pramasoul/micropython,ernesto-g/micropython,utopiaprince/micropython,mpalomer/micropython,selste/micropython,cnoviello/micropython,ryannathans/micropython,ceramos/micropython,stonegithubs/micropython,swegener/micropython,PappaPeppar/micropython,turbinenreiter/micropython,tdautc19841202/micropython,selste/micropython,tuc-osg/micropython,pfalcon/micropython,danicampora/micropython,trezor/micropython,trezor/micropython,oopy/micropython,misterdanb/micropython,noahchense/micropython,lbattraw/micropython,mpalomer/micropython,danicampora/micropython,warner83/micropython,stonegithubs/micropython,AriZuu/micropython,mianos/micropython,mgyenik/micropython,dinau/micropython,selste/micropython,suda/micropython,jlillest/micropython,neilh10/micropython,supergis/micropython,xuxiaoxin/micropython,TDAbboud/micropython,adafruit/circuitpython,lowRISC/micropython,dxxb/micropython,alex-robbins/micropython,feilongfl/micropython,kerneltask/micropython,noahwilliamsson/micropython,jimkmc/micropython,jlillest/micropython,trezor/micropython,turbinenreiter/micropython,redbear/micropython,xyb/micropython,ceramos/micropython,trezor/micropython,dhylands/micropython,vriera/micropython,noahchense/micropython,MrSurly/micropython-esp32,dhylands/micropython,orionrobots/micropython,alex-robbins/micropython,paul-xxx/micropython,galenhz/micropython,MrSurly/micropython,turbinenreiter/micropython,vriera/micropython,aethaniel/micropython,Timmenem/micropython,ryannathans/micropython,adafruit/circuitpython,ceramos/micropython,TDAbboud/micropython,jimkmc/micropython,cwyark/micropython,hiway/micropython,paul-xxx/micropython,mhoffma/micropython,dinau/micropython,pfalcon/micropython,ChuckM/micropython,hosaka/micropython,henriknelson/micropython,paul-xxx/micropython,martinribelotta/micropython,redbear/micropython,heisewangluo/micropython,cwyark/micropython,cloudformdesign/micropython,noahwilliamsson/micropython,EcmaXp/micropython,TDAbboud/micropython,xhat/micropython,neilh10/micropython,supergis/micropython,jmarcelino/pycom-micropython,matthewelse/micropython,hiway/micropython,cnoviello/micropython,alex-robbins/micropython,ceramos/micropython,ruffy91/micropython,torwag/micropython,micropython/micropython-esp32,adafruit/micropython,xyb/micropython,deshipu/micropython,ChuckM/micropython,PappaPeppar/micropython,cloudformdesign/micropython,supergis/micropython,adafruit/circuitpython,stonegithubs/micropython,tuc-osg/micropython,micropython/micropython-esp32,omtinez/micropython,jimkmc/micropython,AriZuu/micropython,EcmaXp/micropython,bvernoux/micropython,tobbad/micropython,pramasoul/micropython,martinribelotta/micropython,tuc-osg/micropython,ericsnowcurrently/micropython,firstval/micropython,hiway/micropython,omtinez/micropython,tobbad/micropython,jlillest/micropython,infinnovation/micropython,ahotam/micropython,firstval/micropython,KISSMonX/micropython,pozetroninc/micropython,adafruit/micropython,oopy/micropython,henriknelson/micropython,Peetz0r/micropython-esp32,tdautc19841202/micropython,omtinez/micropython,dxxb/micropython,Peetz0r/micropython-esp32,Peetz0r/micropython-esp32,mgyenik/micropython,SHA2017-badge/micropython-esp32,puuu/micropython,drrk/micropython,praemdonck/micropython,vitiral/micropython,EcmaXp/micropython,redbear/micropython,kostyll/micropython,blazewicz/micropython,matthewelse/micropython,ruffy91/micropython,PappaPeppar/micropython,xuxiaoxin/micropython,stonegithubs/micropython,pfalcon/micropython,dxxb/micropython,henriknelson/micropython,kostyll/micropython,chrisdearman/micropython,mhoffma/micropython,kostyll/micropython,suda/micropython,utopiaprince/micropython,micropython/micropython-esp32,AriZuu/micropython,xhat/micropython,emfcamp/micropython,ahotam/micropython,KISSMonX/micropython,chrisdearman/micropython,HenrikSolver/micropython,pramasoul/micropython,TDAbboud/micropython,puuu/micropython,warner83/micropython,blmorris/micropython,oopy/micropython,HenrikSolver/micropython,martinribelotta/micropython,tobbad/micropython,mgyenik/micropython,mianos/micropython,lowRISC/micropython,pozetroninc/micropython,tdautc19841202/micropython,misterdanb/micropython,paul-xxx/micropython,bvernoux/micropython,tralamazza/micropython,Vogtinator/micropython,dmazzella/micropython,MrSurly/micropython-esp32,slzatz/micropython,mhoffma/micropython,MrSurly/micropython,selste/micropython,hosaka/micropython,lbattraw/micropython,emfcamp/micropython,bvernoux/micropython,lowRISC/micropython,cnoviello/micropython,ruffy91/micropython,SHA2017-badge/micropython-esp32,warner83/micropython,slzatz/micropython,galenhz/micropython,slzatz/micropython,feilongfl/micropython,Vogtinator/micropython,cloudformdesign/micropython,blmorris/micropython,martinribelotta/micropython,rubencabrera/micropython,mianos/micropython,aethaniel/micropython,henriknelson/micropython,puuu/micropython,SungEun-Steve-Kim/test-mp,skybird6672/micropython,stonegithubs/micropython,drrk/micropython,SungEun-Steve-Kim/test-mp,omtinez/micropython,HenrikSolver/micropython,tralamazza/micropython,dhylands/micropython,ChuckM/micropython,pramasoul/micropython,rubencabrera/micropython,Peetz0r/micropython-esp32,adafruit/micropython,ernesto-g/micropython,dxxb/micropython,pfalcon/micropython,adafruit/circuitpython,toolmacher/micropython,swegener/micropython,KISSMonX/micropython,ahotam/micropython,danicampora/micropython,adafruit/circuitpython,dmazzella/micropython,TDAbboud/micropython,drrk/micropython,torwag/micropython,cnoviello/micropython,feilongfl/micropython,mhoffma/micropython,cwyark/micropython,aethaniel/micropython,matthewelse/micropython,supergis/micropython,xhat/micropython,danicampora/micropython,torwag/micropython,feilongfl/micropython,infinnovation/micropython,AriZuu/micropython,MrSurly/micropython-esp32,toolmacher/micropython,tralamazza/micropython,infinnovation/micropython,galenhz/micropython,feilongfl/micropython,matthewelse/micropython,alex-march/micropython,misterdanb/micropython,mpalomer/micropython,dhylands/micropython,oopy/micropython,ruffy91/micropython,deshipu/micropython,hosaka/micropython,kerneltask/micropython,firstval/micropython,chrisdearman/micropython,vitiral/micropython,dmazzella/micropython,turbinenreiter/micropython,AriZuu/micropython,deshipu/micropython,xhat/micropython,mpalomer/micropython,Peetz0r/micropython-esp32,skybird6672/micropython,rubencabrera/micropython,ceramos/micropython,drrk/micropython,adamkh/micropython,mianos/micropython,tuc-osg/micropython,jlillest/micropython,xhat/micropython,skybird6672/micropython,MrSurly/micropython-esp32,SungEun-Steve-Kim/test-mp,matthewelse/micropython,vriera/micropython,cloudformdesign/micropython,Timmenem/micropython,galenhz/micropython,noahchense/micropython,utopiaprince/micropython,omtinez/micropython,firstval/micropython,ernesto-g/micropython,MrSurly/micropython,alex-march/micropython,mgyenik/micropython,deshipu/micropython,tdautc19841202/micropython,noahchense/micropython,micropython/micropython-esp32,aethaniel/micropython,tobbad/micropython,noahwilliamsson/micropython,infinnovation/micropython,alex-march/micropython,swegener/micropython,utopiaprince/micropython,skybird6672/micropython,heisewangluo/micropython,bvernoux/micropython,pozetroninc/micropython,vitiral/micropython,lbattraw/micropython,ryannathans/micropython,noahwilliamsson/micropython,orionrobots/micropython,lbattraw/micropython,trezor/micropython,ganshun666/micropython,ernesto-g/micropython,ganshun666/micropython,emfcamp/micropython
|
tests: Add test for semantics of for-loop that optimisation can break.
|
# test assigning to iterator within the loop
for i in range(2):
print(i)
i = 2
# test assigning to range parameter within the loop
# (since we optimise for loops, this needs checking, currently it fails)
#n = 2
#for i in range(n):
# print(i)
# n = 0
|
<commit_before><commit_msg>tests: Add test for semantics of for-loop that optimisation can break.<commit_after>
|
# test assigning to iterator within the loop
for i in range(2):
print(i)
i = 2
# test assigning to range parameter within the loop
# (since we optimise for loops, this needs checking, currently it fails)
#n = 2
#for i in range(n):
# print(i)
# n = 0
|
tests: Add test for semantics of for-loop that optimisation can break.# test assigning to iterator within the loop
for i in range(2):
print(i)
i = 2
# test assigning to range parameter within the loop
# (since we optimise for loops, this needs checking, currently it fails)
#n = 2
#for i in range(n):
# print(i)
# n = 0
|
<commit_before><commit_msg>tests: Add test for semantics of for-loop that optimisation can break.<commit_after># test assigning to iterator within the loop
for i in range(2):
print(i)
i = 2
# test assigning to range parameter within the loop
# (since we optimise for loops, this needs checking, currently it fails)
#n = 2
#for i in range(n):
# print(i)
# n = 0
|
|
08a3317d577e0ee5dfa07f8a81b7a4a018297b4a
|
dstar-lite/scripts/python_pipe.py
|
dstar-lite/scripts/python_pipe.py
|
import csv
import numpy as np
import scipy.io as sio
def process_gridworld_data(input, imsize):
# run training from input matlab data file, and save test data prediction in output file
# load data from Matlab file, including
# im_data: flattened images
# state_data: concatenated one-hot vectors for each state variable
# state_xy_data: state variable (x,y position)
# label_data: one-hot vector for action (state difference)
im_size = [imsize, imsize]
matlab_data = sio.loadmat(input)
im_data = matlab_data["batch_im_data"]
im_data = (im_data - 1)/255 # obstacles = 1, free zone = 0
value_data = matlab_data["batch_value_data"]
state1_data = matlab_data["state_x_data"]
state2_data = matlab_data["state_y_data"]
label_data = matlab_data["batch_label_data"]
ydata = label_data.astype('int8')
Xim_data = im_data.astype('float32')
Xim_data = Xim_data.reshape(-1, 1, im_size[0], im_size[1])
Xval_data = value_data.astype('float32')
Xval_data = Xval_data.reshape(-1, 1, im_size[0], im_size[1])
Xdata = np.append(Xim_data, Xval_data, axis=1)
# Need to transpose because Theano is NCHW, while TensorFlow is NHWC
Xdata = np.transpose(Xdata, (0, 2, 3, 1))
S1data = state1_data.astype('int8')
S2data = state2_data.astype('int8')
return im_data
float_formatter = lambda x: "%.1d" % x
im_data = process_gridworld_data("../resources/gridworld_8.mat", 8)
i = 0
im_formatted = []
for line in im_data[1]
if float_formatter(line) != "":
im_formatted.append(float_formatter(line))
i = i +1
import pdb; pdb.set_trace() # breakpoint ec7f2b0e //
print(im_data)
with open('../resources/gridworld_8.csv', 'w', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=' ',
quotechar='', quoting=csv.QUOTE_NONE)
writer.writerows(im_formatted)
|
Create a script to parse .mat and write .csv file
|
Create a script to parse .mat and write .csv file
|
Python
|
mit
|
ToniRV/Learning-to-navigate-without-a-map,ToniRV/Learning-to-navigate-without-a-map
|
Create a script to parse .mat and write .csv file
|
import csv
import numpy as np
import scipy.io as sio
def process_gridworld_data(input, imsize):
# run training from input matlab data file, and save test data prediction in output file
# load data from Matlab file, including
# im_data: flattened images
# state_data: concatenated one-hot vectors for each state variable
# state_xy_data: state variable (x,y position)
# label_data: one-hot vector for action (state difference)
im_size = [imsize, imsize]
matlab_data = sio.loadmat(input)
im_data = matlab_data["batch_im_data"]
im_data = (im_data - 1)/255 # obstacles = 1, free zone = 0
value_data = matlab_data["batch_value_data"]
state1_data = matlab_data["state_x_data"]
state2_data = matlab_data["state_y_data"]
label_data = matlab_data["batch_label_data"]
ydata = label_data.astype('int8')
Xim_data = im_data.astype('float32')
Xim_data = Xim_data.reshape(-1, 1, im_size[0], im_size[1])
Xval_data = value_data.astype('float32')
Xval_data = Xval_data.reshape(-1, 1, im_size[0], im_size[1])
Xdata = np.append(Xim_data, Xval_data, axis=1)
# Need to transpose because Theano is NCHW, while TensorFlow is NHWC
Xdata = np.transpose(Xdata, (0, 2, 3, 1))
S1data = state1_data.astype('int8')
S2data = state2_data.astype('int8')
return im_data
float_formatter = lambda x: "%.1d" % x
im_data = process_gridworld_data("../resources/gridworld_8.mat", 8)
i = 0
im_formatted = []
for line in im_data[1]
if float_formatter(line) != "":
im_formatted.append(float_formatter(line))
i = i +1
import pdb; pdb.set_trace() # breakpoint ec7f2b0e //
print(im_data)
with open('../resources/gridworld_8.csv', 'w', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=' ',
quotechar='', quoting=csv.QUOTE_NONE)
writer.writerows(im_formatted)
|
<commit_before><commit_msg>Create a script to parse .mat and write .csv file<commit_after>
|
import csv
import numpy as np
import scipy.io as sio
def process_gridworld_data(input, imsize):
# run training from input matlab data file, and save test data prediction in output file
# load data from Matlab file, including
# im_data: flattened images
# state_data: concatenated one-hot vectors for each state variable
# state_xy_data: state variable (x,y position)
# label_data: one-hot vector for action (state difference)
im_size = [imsize, imsize]
matlab_data = sio.loadmat(input)
im_data = matlab_data["batch_im_data"]
im_data = (im_data - 1)/255 # obstacles = 1, free zone = 0
value_data = matlab_data["batch_value_data"]
state1_data = matlab_data["state_x_data"]
state2_data = matlab_data["state_y_data"]
label_data = matlab_data["batch_label_data"]
ydata = label_data.astype('int8')
Xim_data = im_data.astype('float32')
Xim_data = Xim_data.reshape(-1, 1, im_size[0], im_size[1])
Xval_data = value_data.astype('float32')
Xval_data = Xval_data.reshape(-1, 1, im_size[0], im_size[1])
Xdata = np.append(Xim_data, Xval_data, axis=1)
# Need to transpose because Theano is NCHW, while TensorFlow is NHWC
Xdata = np.transpose(Xdata, (0, 2, 3, 1))
S1data = state1_data.astype('int8')
S2data = state2_data.astype('int8')
return im_data
float_formatter = lambda x: "%.1d" % x
im_data = process_gridworld_data("../resources/gridworld_8.mat", 8)
i = 0
im_formatted = []
for line in im_data[1]
if float_formatter(line) != "":
im_formatted.append(float_formatter(line))
i = i +1
import pdb; pdb.set_trace() # breakpoint ec7f2b0e //
print(im_data)
with open('../resources/gridworld_8.csv', 'w', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=' ',
quotechar='', quoting=csv.QUOTE_NONE)
writer.writerows(im_formatted)
|
Create a script to parse .mat and write .csv file
import csv
import numpy as np
import scipy.io as sio
def process_gridworld_data(input, imsize):
# run training from input matlab data file, and save test data prediction in output file
# load data from Matlab file, including
# im_data: flattened images
# state_data: concatenated one-hot vectors for each state variable
# state_xy_data: state variable (x,y position)
# label_data: one-hot vector for action (state difference)
im_size = [imsize, imsize]
matlab_data = sio.loadmat(input)
im_data = matlab_data["batch_im_data"]
im_data = (im_data - 1)/255 # obstacles = 1, free zone = 0
value_data = matlab_data["batch_value_data"]
state1_data = matlab_data["state_x_data"]
state2_data = matlab_data["state_y_data"]
label_data = matlab_data["batch_label_data"]
ydata = label_data.astype('int8')
Xim_data = im_data.astype('float32')
Xim_data = Xim_data.reshape(-1, 1, im_size[0], im_size[1])
Xval_data = value_data.astype('float32')
Xval_data = Xval_data.reshape(-1, 1, im_size[0], im_size[1])
Xdata = np.append(Xim_data, Xval_data, axis=1)
# Need to transpose because Theano is NCHW, while TensorFlow is NHWC
Xdata = np.transpose(Xdata, (0, 2, 3, 1))
S1data = state1_data.astype('int8')
S2data = state2_data.astype('int8')
return im_data
float_formatter = lambda x: "%.1d" % x
im_data = process_gridworld_data("../resources/gridworld_8.mat", 8)
i = 0
im_formatted = []
for line in im_data[1]
if float_formatter(line) != "":
im_formatted.append(float_formatter(line))
i = i +1
import pdb; pdb.set_trace() # breakpoint ec7f2b0e //
print(im_data)
with open('../resources/gridworld_8.csv', 'w', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=' ',
quotechar='', quoting=csv.QUOTE_NONE)
writer.writerows(im_formatted)
|
<commit_before><commit_msg>Create a script to parse .mat and write .csv file<commit_after>
import csv
import numpy as np
import scipy.io as sio
def process_gridworld_data(input, imsize):
# run training from input matlab data file, and save test data prediction in output file
# load data from Matlab file, including
# im_data: flattened images
# state_data: concatenated one-hot vectors for each state variable
# state_xy_data: state variable (x,y position)
# label_data: one-hot vector for action (state difference)
im_size = [imsize, imsize]
matlab_data = sio.loadmat(input)
im_data = matlab_data["batch_im_data"]
im_data = (im_data - 1)/255 # obstacles = 1, free zone = 0
value_data = matlab_data["batch_value_data"]
state1_data = matlab_data["state_x_data"]
state2_data = matlab_data["state_y_data"]
label_data = matlab_data["batch_label_data"]
ydata = label_data.astype('int8')
Xim_data = im_data.astype('float32')
Xim_data = Xim_data.reshape(-1, 1, im_size[0], im_size[1])
Xval_data = value_data.astype('float32')
Xval_data = Xval_data.reshape(-1, 1, im_size[0], im_size[1])
Xdata = np.append(Xim_data, Xval_data, axis=1)
# Need to transpose because Theano is NCHW, while TensorFlow is NHWC
Xdata = np.transpose(Xdata, (0, 2, 3, 1))
S1data = state1_data.astype('int8')
S2data = state2_data.astype('int8')
return im_data
float_formatter = lambda x: "%.1d" % x
im_data = process_gridworld_data("../resources/gridworld_8.mat", 8)
i = 0
im_formatted = []
for line in im_data[1]
if float_formatter(line) != "":
im_formatted.append(float_formatter(line))
i = i +1
import pdb; pdb.set_trace() # breakpoint ec7f2b0e //
print(im_data)
with open('../resources/gridworld_8.csv', 'w', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=' ',
quotechar='', quoting=csv.QUOTE_NONE)
writer.writerows(im_formatted)
|
|
83e036f3d89c4b3956bde006085becb496a1fb6e
|
dbscan/test.py
|
dbscan/test.py
|
'''
Generate dummy data, and compare output from scikit-learn's DBSCAN.
Example code based on:
http://scikit-learn.org/stable/auto_examples/cluster/plot_dbscan.html#sphx-glr-auto-examples-cluster-plot-dbscan-py
Run with pytest, e.g.:
py.test test.py
'''
import os
import shutil
import subprocess
from sklearn.cluster import DBSCAN
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
import pandas as pd
EPS = .1
MIN_SAMPLES = 3
def test_compare():
'''Compare result of our DBSCAN to scikit-learn
'''
# Make temp directory for dumping intermediate files
os.mkdir('tmp')
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1], [-3, 3], [3, 3]]
X, _ = make_blobs(
n_samples=1000, centers=centers, cluster_std=0.3, random_state=0)
X = StandardScaler().fit_transform(X)
# Write sample data
pd.DataFrame(X).to_csv('tmp/sample_data.csv', header=None, index=False)
# Compute our DBSCAN
# Run in a python subprocess which sends a few lines of q into the stdin
# of a q interpreter. Assumed to run in same directory as dbscan.q module.
subprocess.run(f'''echo ' \
system "l dbscan.q"; \
t:value each flip `x`y!("FF";",") 0: `$"tmp/sample_data.csv"; \
d:dbscan[t;{MIN_SAMPLES};{EPS}]; \
(`:tmp/q.csv) 0: .h.tx[`csv;flip enlist[`labels]!enlist[d]]' | \
$QHOME/m32/q -q''', shell=True, stdout=subprocess.DEVNULL)
qlabels = pd.read_csv('tmp/q.csv')['labels']
# Compute scikit-learn's DBSCAN
db = DBSCAN(eps=EPS, min_samples=MIN_SAMPLES).fit(X)
pylabels = db.labels_
# Compare
assert (qlabels == pylabels).all()
# Cleanup temp directory
shutil.rmtree('tmp')
|
Add python / sklearn comparison script
|
[dbscan] Add python / sklearn comparison script
|
Python
|
mit
|
jlas/ml.q
|
[dbscan] Add python / sklearn comparison script
|
'''
Generate dummy data, and compare output from scikit-learn's DBSCAN.
Example code based on:
http://scikit-learn.org/stable/auto_examples/cluster/plot_dbscan.html#sphx-glr-auto-examples-cluster-plot-dbscan-py
Run with pytest, e.g.:
py.test test.py
'''
import os
import shutil
import subprocess
from sklearn.cluster import DBSCAN
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
import pandas as pd
EPS = .1
MIN_SAMPLES = 3
def test_compare():
'''Compare result of our DBSCAN to scikit-learn
'''
# Make temp directory for dumping intermediate files
os.mkdir('tmp')
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1], [-3, 3], [3, 3]]
X, _ = make_blobs(
n_samples=1000, centers=centers, cluster_std=0.3, random_state=0)
X = StandardScaler().fit_transform(X)
# Write sample data
pd.DataFrame(X).to_csv('tmp/sample_data.csv', header=None, index=False)
# Compute our DBSCAN
# Run in a python subprocess which sends a few lines of q into the stdin
# of a q interpreter. Assumed to run in same directory as dbscan.q module.
subprocess.run(f'''echo ' \
system "l dbscan.q"; \
t:value each flip `x`y!("FF";",") 0: `$"tmp/sample_data.csv"; \
d:dbscan[t;{MIN_SAMPLES};{EPS}]; \
(`:tmp/q.csv) 0: .h.tx[`csv;flip enlist[`labels]!enlist[d]]' | \
$QHOME/m32/q -q''', shell=True, stdout=subprocess.DEVNULL)
qlabels = pd.read_csv('tmp/q.csv')['labels']
# Compute scikit-learn's DBSCAN
db = DBSCAN(eps=EPS, min_samples=MIN_SAMPLES).fit(X)
pylabels = db.labels_
# Compare
assert (qlabels == pylabels).all()
# Cleanup temp directory
shutil.rmtree('tmp')
|
<commit_before><commit_msg>[dbscan] Add python / sklearn comparison script<commit_after>
|
'''
Generate dummy data, and compare output from scikit-learn's DBSCAN.
Example code based on:
http://scikit-learn.org/stable/auto_examples/cluster/plot_dbscan.html#sphx-glr-auto-examples-cluster-plot-dbscan-py
Run with pytest, e.g.:
py.test test.py
'''
import os
import shutil
import subprocess
from sklearn.cluster import DBSCAN
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
import pandas as pd
EPS = .1
MIN_SAMPLES = 3
def test_compare():
'''Compare result of our DBSCAN to scikit-learn
'''
# Make temp directory for dumping intermediate files
os.mkdir('tmp')
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1], [-3, 3], [3, 3]]
X, _ = make_blobs(
n_samples=1000, centers=centers, cluster_std=0.3, random_state=0)
X = StandardScaler().fit_transform(X)
# Write sample data
pd.DataFrame(X).to_csv('tmp/sample_data.csv', header=None, index=False)
# Compute our DBSCAN
# Run in a python subprocess which sends a few lines of q into the stdin
# of a q interpreter. Assumed to run in same directory as dbscan.q module.
subprocess.run(f'''echo ' \
system "l dbscan.q"; \
t:value each flip `x`y!("FF";",") 0: `$"tmp/sample_data.csv"; \
d:dbscan[t;{MIN_SAMPLES};{EPS}]; \
(`:tmp/q.csv) 0: .h.tx[`csv;flip enlist[`labels]!enlist[d]]' | \
$QHOME/m32/q -q''', shell=True, stdout=subprocess.DEVNULL)
qlabels = pd.read_csv('tmp/q.csv')['labels']
# Compute scikit-learn's DBSCAN
db = DBSCAN(eps=EPS, min_samples=MIN_SAMPLES).fit(X)
pylabels = db.labels_
# Compare
assert (qlabels == pylabels).all()
# Cleanup temp directory
shutil.rmtree('tmp')
|
[dbscan] Add python / sklearn comparison script'''
Generate dummy data, and compare output from scikit-learn's DBSCAN.
Example code based on:
http://scikit-learn.org/stable/auto_examples/cluster/plot_dbscan.html#sphx-glr-auto-examples-cluster-plot-dbscan-py
Run with pytest, e.g.:
py.test test.py
'''
import os
import shutil
import subprocess
from sklearn.cluster import DBSCAN
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
import pandas as pd
EPS = .1
MIN_SAMPLES = 3
def test_compare():
'''Compare result of our DBSCAN to scikit-learn
'''
# Make temp directory for dumping intermediate files
os.mkdir('tmp')
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1], [-3, 3], [3, 3]]
X, _ = make_blobs(
n_samples=1000, centers=centers, cluster_std=0.3, random_state=0)
X = StandardScaler().fit_transform(X)
# Write sample data
pd.DataFrame(X).to_csv('tmp/sample_data.csv', header=None, index=False)
# Compute our DBSCAN
# Run in a python subprocess which sends a few lines of q into the stdin
# of a q interpreter. Assumed to run in same directory as dbscan.q module.
subprocess.run(f'''echo ' \
system "l dbscan.q"; \
t:value each flip `x`y!("FF";",") 0: `$"tmp/sample_data.csv"; \
d:dbscan[t;{MIN_SAMPLES};{EPS}]; \
(`:tmp/q.csv) 0: .h.tx[`csv;flip enlist[`labels]!enlist[d]]' | \
$QHOME/m32/q -q''', shell=True, stdout=subprocess.DEVNULL)
qlabels = pd.read_csv('tmp/q.csv')['labels']
# Compute scikit-learn's DBSCAN
db = DBSCAN(eps=EPS, min_samples=MIN_SAMPLES).fit(X)
pylabels = db.labels_
# Compare
assert (qlabels == pylabels).all()
# Cleanup temp directory
shutil.rmtree('tmp')
|
<commit_before><commit_msg>[dbscan] Add python / sklearn comparison script<commit_after>'''
Generate dummy data, and compare output from scikit-learn's DBSCAN.
Example code based on:
http://scikit-learn.org/stable/auto_examples/cluster/plot_dbscan.html#sphx-glr-auto-examples-cluster-plot-dbscan-py
Run with pytest, e.g.:
py.test test.py
'''
import os
import shutil
import subprocess
from sklearn.cluster import DBSCAN
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
import pandas as pd
EPS = .1
MIN_SAMPLES = 3
def test_compare():
'''Compare result of our DBSCAN to scikit-learn
'''
# Make temp directory for dumping intermediate files
os.mkdir('tmp')
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1], [-3, 3], [3, 3]]
X, _ = make_blobs(
n_samples=1000, centers=centers, cluster_std=0.3, random_state=0)
X = StandardScaler().fit_transform(X)
# Write sample data
pd.DataFrame(X).to_csv('tmp/sample_data.csv', header=None, index=False)
# Compute our DBSCAN
# Run in a python subprocess which sends a few lines of q into the stdin
# of a q interpreter. Assumed to run in same directory as dbscan.q module.
subprocess.run(f'''echo ' \
system "l dbscan.q"; \
t:value each flip `x`y!("FF";",") 0: `$"tmp/sample_data.csv"; \
d:dbscan[t;{MIN_SAMPLES};{EPS}]; \
(`:tmp/q.csv) 0: .h.tx[`csv;flip enlist[`labels]!enlist[d]]' | \
$QHOME/m32/q -q''', shell=True, stdout=subprocess.DEVNULL)
qlabels = pd.read_csv('tmp/q.csv')['labels']
# Compute scikit-learn's DBSCAN
db = DBSCAN(eps=EPS, min_samples=MIN_SAMPLES).fit(X)
pylabels = db.labels_
# Compare
assert (qlabels == pylabels).all()
# Cleanup temp directory
shutil.rmtree('tmp')
|
|
fe6d4383a942eb85e3062f35f5b6d073d92b1cc2
|
tests/test_pgtune.py
|
tests/test_pgtune.py
|
# coding: utf-8
"""
Test suite for PgTune.
"""
from unittest.mock import MagicMock, patch
import pytest
import smdba.postgresqlgate
class TestPgTune:
"""
Test PgTune class.
"""
def test_estimate(self):
"""
Test estimation.
:return:
"""
popen = MagicMock()
popen().read = MagicMock(return_value="11.2")
with patch("smdba.postgresqlgate.os.popen", popen):
pgtune = smdba.postgresqlgate.PgTune(10)
pgtune.get_total_memory = MagicMock(return_value=0x1e0384000)
pgtune.estimate()
assert pgtune.config['shared_buffers'] == '1920MB'
assert pgtune.config['effective_cache_size'] == '5632MB'
assert pgtune.config['work_mem'] == '768MB'
assert pgtune.config['maintenance_work_mem'] == '480MB'
assert pgtune.config['max_wal_size'] == '384MB'
assert pgtune.config['checkpoint_completion_target'] == '0.7'
assert pgtune.config['wal_buffers'] == '4MB'
assert pgtune.config['constraint_exclusion'] == 'off'
assert pgtune.config['max_connections'] == 10
assert pgtune.config['cpu_tuple_cost'] == '0.5'
|
Add unit test for tuning estimations
|
Add unit test for tuning estimations
|
Python
|
mit
|
SUSE/smdba,SUSE/smdba
|
Add unit test for tuning estimations
|
# coding: utf-8
"""
Test suite for PgTune.
"""
from unittest.mock import MagicMock, patch
import pytest
import smdba.postgresqlgate
class TestPgTune:
"""
Test PgTune class.
"""
def test_estimate(self):
"""
Test estimation.
:return:
"""
popen = MagicMock()
popen().read = MagicMock(return_value="11.2")
with patch("smdba.postgresqlgate.os.popen", popen):
pgtune = smdba.postgresqlgate.PgTune(10)
pgtune.get_total_memory = MagicMock(return_value=0x1e0384000)
pgtune.estimate()
assert pgtune.config['shared_buffers'] == '1920MB'
assert pgtune.config['effective_cache_size'] == '5632MB'
assert pgtune.config['work_mem'] == '768MB'
assert pgtune.config['maintenance_work_mem'] == '480MB'
assert pgtune.config['max_wal_size'] == '384MB'
assert pgtune.config['checkpoint_completion_target'] == '0.7'
assert pgtune.config['wal_buffers'] == '4MB'
assert pgtune.config['constraint_exclusion'] == 'off'
assert pgtune.config['max_connections'] == 10
assert pgtune.config['cpu_tuple_cost'] == '0.5'
|
<commit_before><commit_msg>Add unit test for tuning estimations<commit_after>
|
# coding: utf-8
"""
Test suite for PgTune.
"""
from unittest.mock import MagicMock, patch
import pytest
import smdba.postgresqlgate
class TestPgTune:
"""
Test PgTune class.
"""
def test_estimate(self):
"""
Test estimation.
:return:
"""
popen = MagicMock()
popen().read = MagicMock(return_value="11.2")
with patch("smdba.postgresqlgate.os.popen", popen):
pgtune = smdba.postgresqlgate.PgTune(10)
pgtune.get_total_memory = MagicMock(return_value=0x1e0384000)
pgtune.estimate()
assert pgtune.config['shared_buffers'] == '1920MB'
assert pgtune.config['effective_cache_size'] == '5632MB'
assert pgtune.config['work_mem'] == '768MB'
assert pgtune.config['maintenance_work_mem'] == '480MB'
assert pgtune.config['max_wal_size'] == '384MB'
assert pgtune.config['checkpoint_completion_target'] == '0.7'
assert pgtune.config['wal_buffers'] == '4MB'
assert pgtune.config['constraint_exclusion'] == 'off'
assert pgtune.config['max_connections'] == 10
assert pgtune.config['cpu_tuple_cost'] == '0.5'
|
Add unit test for tuning estimations# coding: utf-8
"""
Test suite for PgTune.
"""
from unittest.mock import MagicMock, patch
import pytest
import smdba.postgresqlgate
class TestPgTune:
"""
Test PgTune class.
"""
def test_estimate(self):
"""
Test estimation.
:return:
"""
popen = MagicMock()
popen().read = MagicMock(return_value="11.2")
with patch("smdba.postgresqlgate.os.popen", popen):
pgtune = smdba.postgresqlgate.PgTune(10)
pgtune.get_total_memory = MagicMock(return_value=0x1e0384000)
pgtune.estimate()
assert pgtune.config['shared_buffers'] == '1920MB'
assert pgtune.config['effective_cache_size'] == '5632MB'
assert pgtune.config['work_mem'] == '768MB'
assert pgtune.config['maintenance_work_mem'] == '480MB'
assert pgtune.config['max_wal_size'] == '384MB'
assert pgtune.config['checkpoint_completion_target'] == '0.7'
assert pgtune.config['wal_buffers'] == '4MB'
assert pgtune.config['constraint_exclusion'] == 'off'
assert pgtune.config['max_connections'] == 10
assert pgtune.config['cpu_tuple_cost'] == '0.5'
|
<commit_before><commit_msg>Add unit test for tuning estimations<commit_after># coding: utf-8
"""
Test suite for PgTune.
"""
from unittest.mock import MagicMock, patch
import pytest
import smdba.postgresqlgate
class TestPgTune:
"""
Test PgTune class.
"""
def test_estimate(self):
"""
Test estimation.
:return:
"""
popen = MagicMock()
popen().read = MagicMock(return_value="11.2")
with patch("smdba.postgresqlgate.os.popen", popen):
pgtune = smdba.postgresqlgate.PgTune(10)
pgtune.get_total_memory = MagicMock(return_value=0x1e0384000)
pgtune.estimate()
assert pgtune.config['shared_buffers'] == '1920MB'
assert pgtune.config['effective_cache_size'] == '5632MB'
assert pgtune.config['work_mem'] == '768MB'
assert pgtune.config['maintenance_work_mem'] == '480MB'
assert pgtune.config['max_wal_size'] == '384MB'
assert pgtune.config['checkpoint_completion_target'] == '0.7'
assert pgtune.config['wal_buffers'] == '4MB'
assert pgtune.config['constraint_exclusion'] == 'off'
assert pgtune.config['max_connections'] == 10
assert pgtune.config['cpu_tuple_cost'] == '0.5'
|
|
ed985791d20199af9cb34e445d0a96dc11e9129b
|
climlab/tests/test_rrtm.py
|
climlab/tests/test_rrtm.py
|
from __future__ import division
import numpy as np
import climlab
import pytest
from climlab.radiation import RRTMG, RRTMG_LW, RRTMG_SW, CAM3Radiation_LW
def test_rrtm_creation():
# initial state (temperatures)
state = climlab.column_state(num_lev=num_lev, num_lat=1, water_depth=5.)
# Create a RRTM radiation model
rad = RRTMG(state=state)
rad.step_forward()
assert type(rad.subprocess['LW']) is RRTMG_LW
assert type(rad.subprocess['SW']) is RRTMG_SW
assert hasattr(rad, 'OLR')
assert hasattr(rad, 'OLRclr')
assert hasattr(rad, 'ASR')
assert hasattr(rad, 'ASRclr')
def test_swap_component():
# initial state (temperatures)
state = climlab.column_state(num_lev=num_lev, num_lat=1, water_depth=5.)
# Create a RRTM radiation model
rad = RRTM(state=state)
rad.step_forward()
# Swap out the longwave model for CAM3
rad.remove_subprocess('LW')
rad.step_forward()
rad.add_subprocess('LW', CAM3Radiation_LW(state=state))
rad.step_forward()
assert hasattr(rad, 'OLR')
|
Add some basic tests for RRTM scheme.
|
Add some basic tests for RRTM scheme.
|
Python
|
mit
|
brian-rose/climlab,cjcardinale/climlab,brian-rose/climlab,cjcardinale/climlab,cjcardinale/climlab
|
Add some basic tests for RRTM scheme.
|
from __future__ import division
import numpy as np
import climlab
import pytest
from climlab.radiation import RRTMG, RRTMG_LW, RRTMG_SW, CAM3Radiation_LW
def test_rrtm_creation():
# initial state (temperatures)
state = climlab.column_state(num_lev=num_lev, num_lat=1, water_depth=5.)
# Create a RRTM radiation model
rad = RRTMG(state=state)
rad.step_forward()
assert type(rad.subprocess['LW']) is RRTMG_LW
assert type(rad.subprocess['SW']) is RRTMG_SW
assert hasattr(rad, 'OLR')
assert hasattr(rad, 'OLRclr')
assert hasattr(rad, 'ASR')
assert hasattr(rad, 'ASRclr')
def test_swap_component():
# initial state (temperatures)
state = climlab.column_state(num_lev=num_lev, num_lat=1, water_depth=5.)
# Create a RRTM radiation model
rad = RRTM(state=state)
rad.step_forward()
# Swap out the longwave model for CAM3
rad.remove_subprocess('LW')
rad.step_forward()
rad.add_subprocess('LW', CAM3Radiation_LW(state=state))
rad.step_forward()
assert hasattr(rad, 'OLR')
|
<commit_before><commit_msg>Add some basic tests for RRTM scheme.<commit_after>
|
from __future__ import division
import numpy as np
import climlab
import pytest
from climlab.radiation import RRTMG, RRTMG_LW, RRTMG_SW, CAM3Radiation_LW
def test_rrtm_creation():
# initial state (temperatures)
state = climlab.column_state(num_lev=num_lev, num_lat=1, water_depth=5.)
# Create a RRTM radiation model
rad = RRTMG(state=state)
rad.step_forward()
assert type(rad.subprocess['LW']) is RRTMG_LW
assert type(rad.subprocess['SW']) is RRTMG_SW
assert hasattr(rad, 'OLR')
assert hasattr(rad, 'OLRclr')
assert hasattr(rad, 'ASR')
assert hasattr(rad, 'ASRclr')
def test_swap_component():
# initial state (temperatures)
state = climlab.column_state(num_lev=num_lev, num_lat=1, water_depth=5.)
# Create a RRTM radiation model
rad = RRTM(state=state)
rad.step_forward()
# Swap out the longwave model for CAM3
rad.remove_subprocess('LW')
rad.step_forward()
rad.add_subprocess('LW', CAM3Radiation_LW(state=state))
rad.step_forward()
assert hasattr(rad, 'OLR')
|
Add some basic tests for RRTM scheme.from __future__ import division
import numpy as np
import climlab
import pytest
from climlab.radiation import RRTMG, RRTMG_LW, RRTMG_SW, CAM3Radiation_LW
def test_rrtm_creation():
# initial state (temperatures)
state = climlab.column_state(num_lev=num_lev, num_lat=1, water_depth=5.)
# Create a RRTM radiation model
rad = RRTMG(state=state)
rad.step_forward()
assert type(rad.subprocess['LW']) is RRTMG_LW
assert type(rad.subprocess['SW']) is RRTMG_SW
assert hasattr(rad, 'OLR')
assert hasattr(rad, 'OLRclr')
assert hasattr(rad, 'ASR')
assert hasattr(rad, 'ASRclr')
def test_swap_component():
# initial state (temperatures)
state = climlab.column_state(num_lev=num_lev, num_lat=1, water_depth=5.)
# Create a RRTM radiation model
rad = RRTM(state=state)
rad.step_forward()
# Swap out the longwave model for CAM3
rad.remove_subprocess('LW')
rad.step_forward()
rad.add_subprocess('LW', CAM3Radiation_LW(state=state))
rad.step_forward()
assert hasattr(rad, 'OLR')
|
<commit_before><commit_msg>Add some basic tests for RRTM scheme.<commit_after>from __future__ import division
import numpy as np
import climlab
import pytest
from climlab.radiation import RRTMG, RRTMG_LW, RRTMG_SW, CAM3Radiation_LW
def test_rrtm_creation():
# initial state (temperatures)
state = climlab.column_state(num_lev=num_lev, num_lat=1, water_depth=5.)
# Create a RRTM radiation model
rad = RRTMG(state=state)
rad.step_forward()
assert type(rad.subprocess['LW']) is RRTMG_LW
assert type(rad.subprocess['SW']) is RRTMG_SW
assert hasattr(rad, 'OLR')
assert hasattr(rad, 'OLRclr')
assert hasattr(rad, 'ASR')
assert hasattr(rad, 'ASRclr')
def test_swap_component():
# initial state (temperatures)
state = climlab.column_state(num_lev=num_lev, num_lat=1, water_depth=5.)
# Create a RRTM radiation model
rad = RRTM(state=state)
rad.step_forward()
# Swap out the longwave model for CAM3
rad.remove_subprocess('LW')
rad.step_forward()
rad.add_subprocess('LW', CAM3Radiation_LW(state=state))
rad.step_forward()
assert hasattr(rad, 'OLR')
|
|
a84d8005193328b63eaf98f0852dc72c3e58aed9
|
twitter_feed.py
|
twitter_feed.py
|
# authenticates with twitter, searches for microsoft, evaluates overall
# sentiment for microsoft
import numpy as np
import twitter
from textblob import TextBlob
f = open('me.auth')
keys = f.readlines()
# Read in keys
keys = [x.strip('\n') for x in keys]
# Connect
api = twitter.Api(consumer_key = keys[0],
consumer_secret = keys[1],
access_token_key = keys[2],
access_token_secret = keys[3])
print 'logged in as ', api.VerifyCredentials().name
search = api.GetSearch(term='microsoft', )
# Make text blobs out of status content
blobs = [ TextBlob(status.text) for status in search ]
sentiments = [ blob.sentiment.polarity for blob in blobs ]
filtered_sentiments = filter(lambda a: a!=0.0, sentiments)
overall_sentiment = sum(filtered_sentiments)/len(filtered_sentiments)
print 'Overall sentiment for microsoft: {0}'.format(overall_sentiment)
|
Add example script for evaluating setiment
|
Add example script for evaluating setiment
|
Python
|
mit
|
dankolbman/MarketCents
|
Add example script for evaluating setiment
|
# authenticates with twitter, searches for microsoft, evaluates overall
# sentiment for microsoft
import numpy as np
import twitter
from textblob import TextBlob
f = open('me.auth')
keys = f.readlines()
# Read in keys
keys = [x.strip('\n') for x in keys]
# Connect
api = twitter.Api(consumer_key = keys[0],
consumer_secret = keys[1],
access_token_key = keys[2],
access_token_secret = keys[3])
print 'logged in as ', api.VerifyCredentials().name
search = api.GetSearch(term='microsoft', )
# Make text blobs out of status content
blobs = [ TextBlob(status.text) for status in search ]
sentiments = [ blob.sentiment.polarity for blob in blobs ]
filtered_sentiments = filter(lambda a: a!=0.0, sentiments)
overall_sentiment = sum(filtered_sentiments)/len(filtered_sentiments)
print 'Overall sentiment for microsoft: {0}'.format(overall_sentiment)
|
<commit_before><commit_msg>Add example script for evaluating setiment<commit_after>
|
# authenticates with twitter, searches for microsoft, evaluates overall
# sentiment for microsoft
import numpy as np
import twitter
from textblob import TextBlob
f = open('me.auth')
keys = f.readlines()
# Read in keys
keys = [x.strip('\n') for x in keys]
# Connect
api = twitter.Api(consumer_key = keys[0],
consumer_secret = keys[1],
access_token_key = keys[2],
access_token_secret = keys[3])
print 'logged in as ', api.VerifyCredentials().name
search = api.GetSearch(term='microsoft', )
# Make text blobs out of status content
blobs = [ TextBlob(status.text) for status in search ]
sentiments = [ blob.sentiment.polarity for blob in blobs ]
filtered_sentiments = filter(lambda a: a!=0.0, sentiments)
overall_sentiment = sum(filtered_sentiments)/len(filtered_sentiments)
print 'Overall sentiment for microsoft: {0}'.format(overall_sentiment)
|
Add example script for evaluating setiment# authenticates with twitter, searches for microsoft, evaluates overall
# sentiment for microsoft
import numpy as np
import twitter
from textblob import TextBlob
f = open('me.auth')
keys = f.readlines()
# Read in keys
keys = [x.strip('\n') for x in keys]
# Connect
api = twitter.Api(consumer_key = keys[0],
consumer_secret = keys[1],
access_token_key = keys[2],
access_token_secret = keys[3])
print 'logged in as ', api.VerifyCredentials().name
search = api.GetSearch(term='microsoft', )
# Make text blobs out of status content
blobs = [ TextBlob(status.text) for status in search ]
sentiments = [ blob.sentiment.polarity for blob in blobs ]
filtered_sentiments = filter(lambda a: a!=0.0, sentiments)
overall_sentiment = sum(filtered_sentiments)/len(filtered_sentiments)
print 'Overall sentiment for microsoft: {0}'.format(overall_sentiment)
|
<commit_before><commit_msg>Add example script for evaluating setiment<commit_after># authenticates with twitter, searches for microsoft, evaluates overall
# sentiment for microsoft
import numpy as np
import twitter
from textblob import TextBlob
f = open('me.auth')
keys = f.readlines()
# Read in keys
keys = [x.strip('\n') for x in keys]
# Connect
api = twitter.Api(consumer_key = keys[0],
consumer_secret = keys[1],
access_token_key = keys[2],
access_token_secret = keys[3])
print 'logged in as ', api.VerifyCredentials().name
search = api.GetSearch(term='microsoft', )
# Make text blobs out of status content
blobs = [ TextBlob(status.text) for status in search ]
sentiments = [ blob.sentiment.polarity for blob in blobs ]
filtered_sentiments = filter(lambda a: a!=0.0, sentiments)
overall_sentiment = sum(filtered_sentiments)/len(filtered_sentiments)
print 'Overall sentiment for microsoft: {0}'.format(overall_sentiment)
|
|
0d7b9e23889b2908e874bda58a119af6b763f04e
|
test_add_group.py
|
test_add_group.py
|
# -*- coding: utf-8 -*-
from selenium.webdriver.firefox.webdriver import WebDriver
from selenium.webdriver.common.action_chains import ActionChains
import time, unittest
def is_alert_present(wd):
try:
wd.switch_to_alert().text
return True
except:
return False
class test_add_group(unittest.TestCase):
def setUp(self):
self.wd = WebDriver()
self.wd.implicitly_wait(60)
def test_test_add_group(self):
success = True
wd = self.wd
wd.get("http://localhost/addressbook/")
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys("admin")
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys("secret")
wd.find_element_by_xpath("//form[@id='LoginForm']/input[3]").click()
wd.find_element_by_link_text("groups").click()
wd.find_element_by_name("new").click()
wd.find_element_by_name("group_name").click()
wd.find_element_by_name("group_name").clear()
wd.find_element_by_name("group_name").send_keys("Test")
wd.find_element_by_name("group_header").click()
wd.find_element_by_name("group_header").clear()
wd.find_element_by_name("group_header").send_keys("test")
wd.find_element_by_name("group_footer").click()
wd.find_element_by_name("group_footer").clear()
wd.find_element_by_name("group_footer").send_keys("test")
wd.find_element_by_name("submit").click()
wd.find_element_by_link_text("group page").click()
wd.find_element_by_link_text("Logout").click()
self.assertTrue(success)
def tearDown(self):
self.wd.quit()
if __name__ == '__main__':
unittest.main()
|
Test Case for adding groups
|
Test Case for adding groups
|
Python
|
apache-2.0
|
labizon/Python_training
|
Test Case for adding groups
|
# -*- coding: utf-8 -*-
from selenium.webdriver.firefox.webdriver import WebDriver
from selenium.webdriver.common.action_chains import ActionChains
import time, unittest
def is_alert_present(wd):
try:
wd.switch_to_alert().text
return True
except:
return False
class test_add_group(unittest.TestCase):
def setUp(self):
self.wd = WebDriver()
self.wd.implicitly_wait(60)
def test_test_add_group(self):
success = True
wd = self.wd
wd.get("http://localhost/addressbook/")
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys("admin")
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys("secret")
wd.find_element_by_xpath("//form[@id='LoginForm']/input[3]").click()
wd.find_element_by_link_text("groups").click()
wd.find_element_by_name("new").click()
wd.find_element_by_name("group_name").click()
wd.find_element_by_name("group_name").clear()
wd.find_element_by_name("group_name").send_keys("Test")
wd.find_element_by_name("group_header").click()
wd.find_element_by_name("group_header").clear()
wd.find_element_by_name("group_header").send_keys("test")
wd.find_element_by_name("group_footer").click()
wd.find_element_by_name("group_footer").clear()
wd.find_element_by_name("group_footer").send_keys("test")
wd.find_element_by_name("submit").click()
wd.find_element_by_link_text("group page").click()
wd.find_element_by_link_text("Logout").click()
self.assertTrue(success)
def tearDown(self):
self.wd.quit()
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Test Case for adding groups<commit_after>
|
# -*- coding: utf-8 -*-
from selenium.webdriver.firefox.webdriver import WebDriver
from selenium.webdriver.common.action_chains import ActionChains
import time, unittest
def is_alert_present(wd):
try:
wd.switch_to_alert().text
return True
except:
return False
class test_add_group(unittest.TestCase):
def setUp(self):
self.wd = WebDriver()
self.wd.implicitly_wait(60)
def test_test_add_group(self):
success = True
wd = self.wd
wd.get("http://localhost/addressbook/")
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys("admin")
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys("secret")
wd.find_element_by_xpath("//form[@id='LoginForm']/input[3]").click()
wd.find_element_by_link_text("groups").click()
wd.find_element_by_name("new").click()
wd.find_element_by_name("group_name").click()
wd.find_element_by_name("group_name").clear()
wd.find_element_by_name("group_name").send_keys("Test")
wd.find_element_by_name("group_header").click()
wd.find_element_by_name("group_header").clear()
wd.find_element_by_name("group_header").send_keys("test")
wd.find_element_by_name("group_footer").click()
wd.find_element_by_name("group_footer").clear()
wd.find_element_by_name("group_footer").send_keys("test")
wd.find_element_by_name("submit").click()
wd.find_element_by_link_text("group page").click()
wd.find_element_by_link_text("Logout").click()
self.assertTrue(success)
def tearDown(self):
self.wd.quit()
if __name__ == '__main__':
unittest.main()
|
Test Case for adding groups# -*- coding: utf-8 -*-
from selenium.webdriver.firefox.webdriver import WebDriver
from selenium.webdriver.common.action_chains import ActionChains
import time, unittest
def is_alert_present(wd):
try:
wd.switch_to_alert().text
return True
except:
return False
class test_add_group(unittest.TestCase):
def setUp(self):
self.wd = WebDriver()
self.wd.implicitly_wait(60)
def test_test_add_group(self):
success = True
wd = self.wd
wd.get("http://localhost/addressbook/")
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys("admin")
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys("secret")
wd.find_element_by_xpath("//form[@id='LoginForm']/input[3]").click()
wd.find_element_by_link_text("groups").click()
wd.find_element_by_name("new").click()
wd.find_element_by_name("group_name").click()
wd.find_element_by_name("group_name").clear()
wd.find_element_by_name("group_name").send_keys("Test")
wd.find_element_by_name("group_header").click()
wd.find_element_by_name("group_header").clear()
wd.find_element_by_name("group_header").send_keys("test")
wd.find_element_by_name("group_footer").click()
wd.find_element_by_name("group_footer").clear()
wd.find_element_by_name("group_footer").send_keys("test")
wd.find_element_by_name("submit").click()
wd.find_element_by_link_text("group page").click()
wd.find_element_by_link_text("Logout").click()
self.assertTrue(success)
def tearDown(self):
self.wd.quit()
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Test Case for adding groups<commit_after># -*- coding: utf-8 -*-
from selenium.webdriver.firefox.webdriver import WebDriver
from selenium.webdriver.common.action_chains import ActionChains
import time, unittest
def is_alert_present(wd):
try:
wd.switch_to_alert().text
return True
except:
return False
class test_add_group(unittest.TestCase):
def setUp(self):
self.wd = WebDriver()
self.wd.implicitly_wait(60)
def test_test_add_group(self):
success = True
wd = self.wd
wd.get("http://localhost/addressbook/")
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys("admin")
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys("secret")
wd.find_element_by_xpath("//form[@id='LoginForm']/input[3]").click()
wd.find_element_by_link_text("groups").click()
wd.find_element_by_name("new").click()
wd.find_element_by_name("group_name").click()
wd.find_element_by_name("group_name").clear()
wd.find_element_by_name("group_name").send_keys("Test")
wd.find_element_by_name("group_header").click()
wd.find_element_by_name("group_header").clear()
wd.find_element_by_name("group_header").send_keys("test")
wd.find_element_by_name("group_footer").click()
wd.find_element_by_name("group_footer").clear()
wd.find_element_by_name("group_footer").send_keys("test")
wd.find_element_by_name("submit").click()
wd.find_element_by_link_text("group page").click()
wd.find_element_by_link_text("Logout").click()
self.assertTrue(success)
def tearDown(self):
self.wd.quit()
if __name__ == '__main__':
unittest.main()
|
|
469c2932575daaf42d8cec5578c087f4e5c340af
|
helusers/jwt.py
|
helusers/jwt.py
|
from django.utils.translation import ugettext as _
from django.contrib.auth import get_user_model
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from rest_framework import exceptions
import random
User = get_user_model()
class JWTAuthentication(JSONWebTokenAuthentication):
def populate_user(self, user, data):
exclude_fields = ['is_staff', 'password', 'is_superuser', 'id']
user_fields = [f.name for f in user._meta.fields if f not in exclude_fields]
changed = False
for field in user_fields:
if field in data:
val = data[field]
if getattr(user, field) != val:
setattr(user, field, val)
changed = True
# Make sure there are no duplicate usernames
tries = 0
while User.objects.filter(username=user.username).exclude(uuid=user.uuid).exists():
user.username = "%s-%d" % (user.username, tries + 1)
changed = True
return changed
def authenticate_credentials(self, payload):
user_id = payload.get('sub')
if not user_id:
msg = _('Invalid payload.')
raise exceptions.AuthenticationFailed(msg)
try:
user = User.objects.get(uuid=user_id)
except User.DoesNotExist:
user = User(uuid=user_id)
user.set_unusable_password()
changed = self.populate_user(user, payload)
if changed:
user.save()
return super(JWTAuthentication, self).authenticate_credentials(payload)
def get_user_id_from_payload_handler(payload):
return payload.get('sub')
|
Add Django REST Framework authentication helpers for JWT
|
Add Django REST Framework authentication helpers for JWT
|
Python
|
bsd-2-clause
|
City-of-Helsinki/django-helusers,City-of-Helsinki/django-helusers
|
Add Django REST Framework authentication helpers for JWT
|
from django.utils.translation import ugettext as _
from django.contrib.auth import get_user_model
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from rest_framework import exceptions
import random
User = get_user_model()
class JWTAuthentication(JSONWebTokenAuthentication):
def populate_user(self, user, data):
exclude_fields = ['is_staff', 'password', 'is_superuser', 'id']
user_fields = [f.name for f in user._meta.fields if f not in exclude_fields]
changed = False
for field in user_fields:
if field in data:
val = data[field]
if getattr(user, field) != val:
setattr(user, field, val)
changed = True
# Make sure there are no duplicate usernames
tries = 0
while User.objects.filter(username=user.username).exclude(uuid=user.uuid).exists():
user.username = "%s-%d" % (user.username, tries + 1)
changed = True
return changed
def authenticate_credentials(self, payload):
user_id = payload.get('sub')
if not user_id:
msg = _('Invalid payload.')
raise exceptions.AuthenticationFailed(msg)
try:
user = User.objects.get(uuid=user_id)
except User.DoesNotExist:
user = User(uuid=user_id)
user.set_unusable_password()
changed = self.populate_user(user, payload)
if changed:
user.save()
return super(JWTAuthentication, self).authenticate_credentials(payload)
def get_user_id_from_payload_handler(payload):
return payload.get('sub')
|
<commit_before><commit_msg>Add Django REST Framework authentication helpers for JWT<commit_after>
|
from django.utils.translation import ugettext as _
from django.contrib.auth import get_user_model
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from rest_framework import exceptions
import random
User = get_user_model()
class JWTAuthentication(JSONWebTokenAuthentication):
def populate_user(self, user, data):
exclude_fields = ['is_staff', 'password', 'is_superuser', 'id']
user_fields = [f.name for f in user._meta.fields if f not in exclude_fields]
changed = False
for field in user_fields:
if field in data:
val = data[field]
if getattr(user, field) != val:
setattr(user, field, val)
changed = True
# Make sure there are no duplicate usernames
tries = 0
while User.objects.filter(username=user.username).exclude(uuid=user.uuid).exists():
user.username = "%s-%d" % (user.username, tries + 1)
changed = True
return changed
def authenticate_credentials(self, payload):
user_id = payload.get('sub')
if not user_id:
msg = _('Invalid payload.')
raise exceptions.AuthenticationFailed(msg)
try:
user = User.objects.get(uuid=user_id)
except User.DoesNotExist:
user = User(uuid=user_id)
user.set_unusable_password()
changed = self.populate_user(user, payload)
if changed:
user.save()
return super(JWTAuthentication, self).authenticate_credentials(payload)
def get_user_id_from_payload_handler(payload):
return payload.get('sub')
|
Add Django REST Framework authentication helpers for JWTfrom django.utils.translation import ugettext as _
from django.contrib.auth import get_user_model
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from rest_framework import exceptions
import random
User = get_user_model()
class JWTAuthentication(JSONWebTokenAuthentication):
def populate_user(self, user, data):
exclude_fields = ['is_staff', 'password', 'is_superuser', 'id']
user_fields = [f.name for f in user._meta.fields if f not in exclude_fields]
changed = False
for field in user_fields:
if field in data:
val = data[field]
if getattr(user, field) != val:
setattr(user, field, val)
changed = True
# Make sure there are no duplicate usernames
tries = 0
while User.objects.filter(username=user.username).exclude(uuid=user.uuid).exists():
user.username = "%s-%d" % (user.username, tries + 1)
changed = True
return changed
def authenticate_credentials(self, payload):
user_id = payload.get('sub')
if not user_id:
msg = _('Invalid payload.')
raise exceptions.AuthenticationFailed(msg)
try:
user = User.objects.get(uuid=user_id)
except User.DoesNotExist:
user = User(uuid=user_id)
user.set_unusable_password()
changed = self.populate_user(user, payload)
if changed:
user.save()
return super(JWTAuthentication, self).authenticate_credentials(payload)
def get_user_id_from_payload_handler(payload):
return payload.get('sub')
|
<commit_before><commit_msg>Add Django REST Framework authentication helpers for JWT<commit_after>from django.utils.translation import ugettext as _
from django.contrib.auth import get_user_model
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from rest_framework import exceptions
import random
User = get_user_model()
class JWTAuthentication(JSONWebTokenAuthentication):
def populate_user(self, user, data):
exclude_fields = ['is_staff', 'password', 'is_superuser', 'id']
user_fields = [f.name for f in user._meta.fields if f not in exclude_fields]
changed = False
for field in user_fields:
if field in data:
val = data[field]
if getattr(user, field) != val:
setattr(user, field, val)
changed = True
# Make sure there are no duplicate usernames
tries = 0
while User.objects.filter(username=user.username).exclude(uuid=user.uuid).exists():
user.username = "%s-%d" % (user.username, tries + 1)
changed = True
return changed
def authenticate_credentials(self, payload):
user_id = payload.get('sub')
if not user_id:
msg = _('Invalid payload.')
raise exceptions.AuthenticationFailed(msg)
try:
user = User.objects.get(uuid=user_id)
except User.DoesNotExist:
user = User(uuid=user_id)
user.set_unusable_password()
changed = self.populate_user(user, payload)
if changed:
user.save()
return super(JWTAuthentication, self).authenticate_credentials(payload)
def get_user_id_from_payload_handler(payload):
return payload.get('sub')
|
|
5c3eaede26381babac281dfa0d9bec3ebe911ba8
|
util/compile.py
|
util/compile.py
|
#!/usr/bin/env python
import sys, os, subprocess
sys.argv.pop(0)
languages = sys.argv
languages.sort()
js_path = os.path.dirname(__file__) + '/../js/'
js_files_to_include = [js_path + 'rainbow.js']
included_languages = []
for language in languages:
path = js_path + 'language/' + language + '.js'
if not os.path.isfile(path):
print "no file for language: ",language
continue
included_languages.append(language)
js_files_to_include.append(path)
print 'waiting for closure compiler...'
proc = subprocess.Popen(['java', '-jar', '/usr/local/compiler-latest/compiler.jar', '--compilation_level', 'ADVANCED_OPTIMIZATIONS'] + js_files_to_include, stdout = subprocess.PIPE, stderr = subprocess.PIPE)
output, err = proc.communicate()
file_name = 'rainbow' + ('+' + '+'.join(included_languages) if len(included_languages) else '') + '.min.js'
print 'writing to file:',file_name
new_file = js_path + file_name
file = open(new_file, "w")
file.write(output)
file.close()
|
Add tool for building custom packages
|
Add tool for building custom packages
|
Python
|
apache-2.0
|
HotelsDotCom/rainbow,linuxl0ver/rainbow,linuxl0ver/rainbow,metasyn/rainbow,ccampbell/rainbow,HotelsDotCom/rainbow,cybrox/rainbow,jeremykenedy/rainbow,cybrox/rainbow,segmentio/rainbow,jeremykenedy/rainbow,ptigas/rainbow,greyhwndz/rainbow,linuxl0ver/rainbow,jeremykenedy/rainbow,javipepe/rainbow,greyhwndz/rainbow,metasyn/rainbow,greyhwndz/rainbow,javipepe/rainbow,javipepe/rainbow,segmentio/rainbow,ccampbell/rainbow,ptigas/rainbow,HotelsDotCom/rainbow,metasyn/rainbow,cybrox/rainbow
|
Add tool for building custom packages
|
#!/usr/bin/env python
import sys, os, subprocess
sys.argv.pop(0)
languages = sys.argv
languages.sort()
js_path = os.path.dirname(__file__) + '/../js/'
js_files_to_include = [js_path + 'rainbow.js']
included_languages = []
for language in languages:
path = js_path + 'language/' + language + '.js'
if not os.path.isfile(path):
print "no file for language: ",language
continue
included_languages.append(language)
js_files_to_include.append(path)
print 'waiting for closure compiler...'
proc = subprocess.Popen(['java', '-jar', '/usr/local/compiler-latest/compiler.jar', '--compilation_level', 'ADVANCED_OPTIMIZATIONS'] + js_files_to_include, stdout = subprocess.PIPE, stderr = subprocess.PIPE)
output, err = proc.communicate()
file_name = 'rainbow' + ('+' + '+'.join(included_languages) if len(included_languages) else '') + '.min.js'
print 'writing to file:',file_name
new_file = js_path + file_name
file = open(new_file, "w")
file.write(output)
file.close()
|
<commit_before><commit_msg>Add tool for building custom packages<commit_after>
|
#!/usr/bin/env python
import sys, os, subprocess
sys.argv.pop(0)
languages = sys.argv
languages.sort()
js_path = os.path.dirname(__file__) + '/../js/'
js_files_to_include = [js_path + 'rainbow.js']
included_languages = []
for language in languages:
path = js_path + 'language/' + language + '.js'
if not os.path.isfile(path):
print "no file for language: ",language
continue
included_languages.append(language)
js_files_to_include.append(path)
print 'waiting for closure compiler...'
proc = subprocess.Popen(['java', '-jar', '/usr/local/compiler-latest/compiler.jar', '--compilation_level', 'ADVANCED_OPTIMIZATIONS'] + js_files_to_include, stdout = subprocess.PIPE, stderr = subprocess.PIPE)
output, err = proc.communicate()
file_name = 'rainbow' + ('+' + '+'.join(included_languages) if len(included_languages) else '') + '.min.js'
print 'writing to file:',file_name
new_file = js_path + file_name
file = open(new_file, "w")
file.write(output)
file.close()
|
Add tool for building custom packages#!/usr/bin/env python
import sys, os, subprocess
sys.argv.pop(0)
languages = sys.argv
languages.sort()
js_path = os.path.dirname(__file__) + '/../js/'
js_files_to_include = [js_path + 'rainbow.js']
included_languages = []
for language in languages:
path = js_path + 'language/' + language + '.js'
if not os.path.isfile(path):
print "no file for language: ",language
continue
included_languages.append(language)
js_files_to_include.append(path)
print 'waiting for closure compiler...'
proc = subprocess.Popen(['java', '-jar', '/usr/local/compiler-latest/compiler.jar', '--compilation_level', 'ADVANCED_OPTIMIZATIONS'] + js_files_to_include, stdout = subprocess.PIPE, stderr = subprocess.PIPE)
output, err = proc.communicate()
file_name = 'rainbow' + ('+' + '+'.join(included_languages) if len(included_languages) else '') + '.min.js'
print 'writing to file:',file_name
new_file = js_path + file_name
file = open(new_file, "w")
file.write(output)
file.close()
|
<commit_before><commit_msg>Add tool for building custom packages<commit_after>#!/usr/bin/env python
import sys, os, subprocess
sys.argv.pop(0)
languages = sys.argv
languages.sort()
js_path = os.path.dirname(__file__) + '/../js/'
js_files_to_include = [js_path + 'rainbow.js']
included_languages = []
for language in languages:
path = js_path + 'language/' + language + '.js'
if not os.path.isfile(path):
print "no file for language: ",language
continue
included_languages.append(language)
js_files_to_include.append(path)
print 'waiting for closure compiler...'
proc = subprocess.Popen(['java', '-jar', '/usr/local/compiler-latest/compiler.jar', '--compilation_level', 'ADVANCED_OPTIMIZATIONS'] + js_files_to_include, stdout = subprocess.PIPE, stderr = subprocess.PIPE)
output, err = proc.communicate()
file_name = 'rainbow' + ('+' + '+'.join(included_languages) if len(included_languages) else '') + '.min.js'
print 'writing to file:',file_name
new_file = js_path + file_name
file = open(new_file, "w")
file.write(output)
file.close()
|
|
f35d5251500268235598e4dbd2ddf91c994e1632
|
ixxy_admin_utils/custom_widgets.py
|
ixxy_admin_utils/custom_widgets.py
|
from dal_select2_tagging.widgets import TaggingSelect2
from django import VERSION
from tagging.utils import parse_tag_input
class IxxyTaggingSelect2(TaggingSelect2):
# TaggingSelect2 doesn't handle spaces as delimeters in tags
# Django-tagging has a function we can use that just works
def render_options(self, *args):
"""Render only selected tags."""
selected_choices_arg = 1 if VERSION < (1, 10) else 0
selected_choices = args[selected_choices_arg]
if selected_choices:
selected_choices = parse_tag_input(selected_choices)
options = [
'<option value="%s" selected="selected">%s</option>' % (c, c)
for c in selected_choices
]
return '\n'.join(options)
|
Fix for django-tagging and automcomplete
|
Fix for django-tagging and automcomplete
|
Python
|
mit
|
DjangoAdminHackers/ixxy-admin-utils,DjangoAdminHackers/ixxy-admin-utils
|
Fix for django-tagging and automcomplete
|
from dal_select2_tagging.widgets import TaggingSelect2
from django import VERSION
from tagging.utils import parse_tag_input
class IxxyTaggingSelect2(TaggingSelect2):
# TaggingSelect2 doesn't handle spaces as delimeters in tags
# Django-tagging has a function we can use that just works
def render_options(self, *args):
"""Render only selected tags."""
selected_choices_arg = 1 if VERSION < (1, 10) else 0
selected_choices = args[selected_choices_arg]
if selected_choices:
selected_choices = parse_tag_input(selected_choices)
options = [
'<option value="%s" selected="selected">%s</option>' % (c, c)
for c in selected_choices
]
return '\n'.join(options)
|
<commit_before><commit_msg>Fix for django-tagging and automcomplete<commit_after>
|
from dal_select2_tagging.widgets import TaggingSelect2
from django import VERSION
from tagging.utils import parse_tag_input
class IxxyTaggingSelect2(TaggingSelect2):
# TaggingSelect2 doesn't handle spaces as delimeters in tags
# Django-tagging has a function we can use that just works
def render_options(self, *args):
"""Render only selected tags."""
selected_choices_arg = 1 if VERSION < (1, 10) else 0
selected_choices = args[selected_choices_arg]
if selected_choices:
selected_choices = parse_tag_input(selected_choices)
options = [
'<option value="%s" selected="selected">%s</option>' % (c, c)
for c in selected_choices
]
return '\n'.join(options)
|
Fix for django-tagging and automcompletefrom dal_select2_tagging.widgets import TaggingSelect2
from django import VERSION
from tagging.utils import parse_tag_input
class IxxyTaggingSelect2(TaggingSelect2):
# TaggingSelect2 doesn't handle spaces as delimeters in tags
# Django-tagging has a function we can use that just works
def render_options(self, *args):
"""Render only selected tags."""
selected_choices_arg = 1 if VERSION < (1, 10) else 0
selected_choices = args[selected_choices_arg]
if selected_choices:
selected_choices = parse_tag_input(selected_choices)
options = [
'<option value="%s" selected="selected">%s</option>' % (c, c)
for c in selected_choices
]
return '\n'.join(options)
|
<commit_before><commit_msg>Fix for django-tagging and automcomplete<commit_after>from dal_select2_tagging.widgets import TaggingSelect2
from django import VERSION
from tagging.utils import parse_tag_input
class IxxyTaggingSelect2(TaggingSelect2):
# TaggingSelect2 doesn't handle spaces as delimeters in tags
# Django-tagging has a function we can use that just works
def render_options(self, *args):
"""Render only selected tags."""
selected_choices_arg = 1 if VERSION < (1, 10) else 0
selected_choices = args[selected_choices_arg]
if selected_choices:
selected_choices = parse_tag_input(selected_choices)
options = [
'<option value="%s" selected="selected">%s</option>' % (c, c)
for c in selected_choices
]
return '\n'.join(options)
|
|
47ae456b3a4252d7c838219e3ebd15e049316891
|
profile_collection/startup/25-shutter.py
|
profile_collection/startup/25-shutter.py
|
from __future__ import print_function
import epics
import logging
from ophyd.controls import EpicsSignal
from ophyd.controls.signal import SignalGroup
class Shutter(SignalGroup):
def __init__(self, open=None, open_status=None,
close=None, close_status=None):
super(Shutter, self).__init__()
signals = [EpicsSignal(open_status, write_pv=open, alias='_open'),
EpicsSignal(close_status, write_pv=close, alias='_close'),
]
for sig in signals:
self.add_signal(sig)
def open(self):
self._open.value = 1
def close(self):
self._close.value = 1
foe_sh = Shutter(open='XF:11ID-PPS{Sh:FE}Cmd:Opn-Cmd',
open_status='XF:11ID-PPS{Sh:FE}Cmd:Opn-Sts',
close='XF:11ID-PPS{Sh:FE}Cmd:Cls-Cmd',
close_status='XF:11ID-PPS{Sh:FE}Cmd:Cls-Sts')
fe_sh = Shutter(open='XF:11IDA-PPS{PSh}Cmd:Opn-Cmd',
open_status='XF:11IDA-PPS{PSh}Cmd:Opn-Sts',
close='XF:11IDA-PPS{PSh}Cmd:Cls-Cmd',
close_status='XF:11IDA-PPS{PSh}Cmd:Cls-Sts')
class FastShutter(EpicsSignal):
def open(self):
self.put(1)
def close(self):
self.put(0)
fast_sh = FastShutter('XF:11IDB-ES{Zebra}:SOFT_IN:B0',
rw=True, name='fast_sh')
|
Add three shutters and open/close functions
|
Add three shutters and open/close functions
|
Python
|
bsd-2-clause
|
NSLS-II-CHX/ipython_ophyd,NSLS-II-CHX/ipython_ophyd
|
Add three shutters and open/close functions
|
from __future__ import print_function
import epics
import logging
from ophyd.controls import EpicsSignal
from ophyd.controls.signal import SignalGroup
class Shutter(SignalGroup):
def __init__(self, open=None, open_status=None,
close=None, close_status=None):
super(Shutter, self).__init__()
signals = [EpicsSignal(open_status, write_pv=open, alias='_open'),
EpicsSignal(close_status, write_pv=close, alias='_close'),
]
for sig in signals:
self.add_signal(sig)
def open(self):
self._open.value = 1
def close(self):
self._close.value = 1
foe_sh = Shutter(open='XF:11ID-PPS{Sh:FE}Cmd:Opn-Cmd',
open_status='XF:11ID-PPS{Sh:FE}Cmd:Opn-Sts',
close='XF:11ID-PPS{Sh:FE}Cmd:Cls-Cmd',
close_status='XF:11ID-PPS{Sh:FE}Cmd:Cls-Sts')
fe_sh = Shutter(open='XF:11IDA-PPS{PSh}Cmd:Opn-Cmd',
open_status='XF:11IDA-PPS{PSh}Cmd:Opn-Sts',
close='XF:11IDA-PPS{PSh}Cmd:Cls-Cmd',
close_status='XF:11IDA-PPS{PSh}Cmd:Cls-Sts')
class FastShutter(EpicsSignal):
def open(self):
self.put(1)
def close(self):
self.put(0)
fast_sh = FastShutter('XF:11IDB-ES{Zebra}:SOFT_IN:B0',
rw=True, name='fast_sh')
|
<commit_before><commit_msg>Add three shutters and open/close functions<commit_after>
|
from __future__ import print_function
import epics
import logging
from ophyd.controls import EpicsSignal
from ophyd.controls.signal import SignalGroup
class Shutter(SignalGroup):
def __init__(self, open=None, open_status=None,
close=None, close_status=None):
super(Shutter, self).__init__()
signals = [EpicsSignal(open_status, write_pv=open, alias='_open'),
EpicsSignal(close_status, write_pv=close, alias='_close'),
]
for sig in signals:
self.add_signal(sig)
def open(self):
self._open.value = 1
def close(self):
self._close.value = 1
foe_sh = Shutter(open='XF:11ID-PPS{Sh:FE}Cmd:Opn-Cmd',
open_status='XF:11ID-PPS{Sh:FE}Cmd:Opn-Sts',
close='XF:11ID-PPS{Sh:FE}Cmd:Cls-Cmd',
close_status='XF:11ID-PPS{Sh:FE}Cmd:Cls-Sts')
fe_sh = Shutter(open='XF:11IDA-PPS{PSh}Cmd:Opn-Cmd',
open_status='XF:11IDA-PPS{PSh}Cmd:Opn-Sts',
close='XF:11IDA-PPS{PSh}Cmd:Cls-Cmd',
close_status='XF:11IDA-PPS{PSh}Cmd:Cls-Sts')
class FastShutter(EpicsSignal):
def open(self):
self.put(1)
def close(self):
self.put(0)
fast_sh = FastShutter('XF:11IDB-ES{Zebra}:SOFT_IN:B0',
rw=True, name='fast_sh')
|
Add three shutters and open/close functionsfrom __future__ import print_function
import epics
import logging
from ophyd.controls import EpicsSignal
from ophyd.controls.signal import SignalGroup
class Shutter(SignalGroup):
def __init__(self, open=None, open_status=None,
close=None, close_status=None):
super(Shutter, self).__init__()
signals = [EpicsSignal(open_status, write_pv=open, alias='_open'),
EpicsSignal(close_status, write_pv=close, alias='_close'),
]
for sig in signals:
self.add_signal(sig)
def open(self):
self._open.value = 1
def close(self):
self._close.value = 1
foe_sh = Shutter(open='XF:11ID-PPS{Sh:FE}Cmd:Opn-Cmd',
open_status='XF:11ID-PPS{Sh:FE}Cmd:Opn-Sts',
close='XF:11ID-PPS{Sh:FE}Cmd:Cls-Cmd',
close_status='XF:11ID-PPS{Sh:FE}Cmd:Cls-Sts')
fe_sh = Shutter(open='XF:11IDA-PPS{PSh}Cmd:Opn-Cmd',
open_status='XF:11IDA-PPS{PSh}Cmd:Opn-Sts',
close='XF:11IDA-PPS{PSh}Cmd:Cls-Cmd',
close_status='XF:11IDA-PPS{PSh}Cmd:Cls-Sts')
class FastShutter(EpicsSignal):
def open(self):
self.put(1)
def close(self):
self.put(0)
fast_sh = FastShutter('XF:11IDB-ES{Zebra}:SOFT_IN:B0',
rw=True, name='fast_sh')
|
<commit_before><commit_msg>Add three shutters and open/close functions<commit_after>from __future__ import print_function
import epics
import logging
from ophyd.controls import EpicsSignal
from ophyd.controls.signal import SignalGroup
class Shutter(SignalGroup):
def __init__(self, open=None, open_status=None,
close=None, close_status=None):
super(Shutter, self).__init__()
signals = [EpicsSignal(open_status, write_pv=open, alias='_open'),
EpicsSignal(close_status, write_pv=close, alias='_close'),
]
for sig in signals:
self.add_signal(sig)
def open(self):
self._open.value = 1
def close(self):
self._close.value = 1
foe_sh = Shutter(open='XF:11ID-PPS{Sh:FE}Cmd:Opn-Cmd',
open_status='XF:11ID-PPS{Sh:FE}Cmd:Opn-Sts',
close='XF:11ID-PPS{Sh:FE}Cmd:Cls-Cmd',
close_status='XF:11ID-PPS{Sh:FE}Cmd:Cls-Sts')
fe_sh = Shutter(open='XF:11IDA-PPS{PSh}Cmd:Opn-Cmd',
open_status='XF:11IDA-PPS{PSh}Cmd:Opn-Sts',
close='XF:11IDA-PPS{PSh}Cmd:Cls-Cmd',
close_status='XF:11IDA-PPS{PSh}Cmd:Cls-Sts')
class FastShutter(EpicsSignal):
def open(self):
self.put(1)
def close(self):
self.put(0)
fast_sh = FastShutter('XF:11IDB-ES{Zebra}:SOFT_IN:B0',
rw=True, name='fast_sh')
|
|
1d021cd7b52ecc4d9684dc607a1ff9f0b8181b37
|
read_receiver.py
|
read_receiver.py
|
#! /usr/bin/env python
import numpy
from matplotlib import pyplot as plt
import matplotlib.colors as colors
import os
import sys
import seaborn
def ParseVariableBinaryHeader(header):
header_detect = '#'
header = header.strip("\n").split()
assert(header[0] == header_detect)
name = header[1]
dtype = header[2]
nb_components = int(header[3])
nx = int(header[4])
ny = int(header[5])
nz = int(header[6])
return name, dtype, nb_components, nx, ny, nz
receiver_filename = "output/receivers.dat"
receiver_file = open(receiver_filename, 'r')
readlines = receiver_file.read().split("\n")
receiver_file.close()
temp_filename = "tmp.binary"
tempfile = open(temp_filename, 'wb')
# Parse header
header = readlines[0]
name, dtype, nb_components, nx, ny, nz = ParseVariableBinaryHeader(header)
# Write data without header
for line in readlines[1:]:
tempfile.write(line + "\n")
tempfile.close()
tempfile = open(temp_filename, 'rb')
data = numpy.fromfile(tempfile, dtype = 'float_')
tempfile.close()
if os.path.exists(temp_filename):
print "Removing temporary file " + str(temp_filename)
os.remove(temp_filename)
print data.shape, nx, nz
data = data.reshape(nz, nx)
amplitude_max = max(numpy.amax(data), - numpy.amin(data))
print "amplitude_max=", amplitude_max
rcv_ids = {'rcv1': (nx / 2, 'blue'),
'rcv2': (nx / 4, 'red'),
'rcv3': (3 * nx / 5, 'green'),
# 'rcv4': (1200, 'orange'),
# 'rcv5': (800, 'purple'),
}
plt.figure()
with seaborn.axes_style("dark"):
cmap = 'gray'
plt.imshow(data, cmap = cmap, interpolation = 'none', aspect = 'auto', vmin = - 0.1 * amplitude_max, vmax = 0.1 * amplitude_max)
for key, value in rcv_ids.iteritems():
rcv_id, color = value
plt.plot([rcv_id, rcv_id], [0.0, nz], color = color, linewidth = 2)
plt.xlim([0,nx])
plt.ylim([nz,0])
plt.figure()
cnt = 1
for key, value in rcv_ids.iteritems():
rcv_id, color = value
offset = numpy.power(-1.0, cnt) * (2.0 * amplitude_max) * (cnt / 2)
print offset
plt.plot(offset + data[:, rcv_id], color = color, linewidth = 2, label = key)
cnt += 1
plt.legend()
plt.show()
|
Add python script to read receiver file
|
Add python script to read receiver file
|
Python
|
apache-2.0
|
RaphaelPoncet/2016-macs2-projet-hpc
|
Add python script to read receiver file
|
#! /usr/bin/env python
import numpy
from matplotlib import pyplot as plt
import matplotlib.colors as colors
import os
import sys
import seaborn
def ParseVariableBinaryHeader(header):
header_detect = '#'
header = header.strip("\n").split()
assert(header[0] == header_detect)
name = header[1]
dtype = header[2]
nb_components = int(header[3])
nx = int(header[4])
ny = int(header[5])
nz = int(header[6])
return name, dtype, nb_components, nx, ny, nz
receiver_filename = "output/receivers.dat"
receiver_file = open(receiver_filename, 'r')
readlines = receiver_file.read().split("\n")
receiver_file.close()
temp_filename = "tmp.binary"
tempfile = open(temp_filename, 'wb')
# Parse header
header = readlines[0]
name, dtype, nb_components, nx, ny, nz = ParseVariableBinaryHeader(header)
# Write data without header
for line in readlines[1:]:
tempfile.write(line + "\n")
tempfile.close()
tempfile = open(temp_filename, 'rb')
data = numpy.fromfile(tempfile, dtype = 'float_')
tempfile.close()
if os.path.exists(temp_filename):
print "Removing temporary file " + str(temp_filename)
os.remove(temp_filename)
print data.shape, nx, nz
data = data.reshape(nz, nx)
amplitude_max = max(numpy.amax(data), - numpy.amin(data))
print "amplitude_max=", amplitude_max
rcv_ids = {'rcv1': (nx / 2, 'blue'),
'rcv2': (nx / 4, 'red'),
'rcv3': (3 * nx / 5, 'green'),
# 'rcv4': (1200, 'orange'),
# 'rcv5': (800, 'purple'),
}
plt.figure()
with seaborn.axes_style("dark"):
cmap = 'gray'
plt.imshow(data, cmap = cmap, interpolation = 'none', aspect = 'auto', vmin = - 0.1 * amplitude_max, vmax = 0.1 * amplitude_max)
for key, value in rcv_ids.iteritems():
rcv_id, color = value
plt.plot([rcv_id, rcv_id], [0.0, nz], color = color, linewidth = 2)
plt.xlim([0,nx])
plt.ylim([nz,0])
plt.figure()
cnt = 1
for key, value in rcv_ids.iteritems():
rcv_id, color = value
offset = numpy.power(-1.0, cnt) * (2.0 * amplitude_max) * (cnt / 2)
print offset
plt.plot(offset + data[:, rcv_id], color = color, linewidth = 2, label = key)
cnt += 1
plt.legend()
plt.show()
|
<commit_before><commit_msg>Add python script to read receiver file<commit_after>
|
#! /usr/bin/env python
import numpy
from matplotlib import pyplot as plt
import matplotlib.colors as colors
import os
import sys
import seaborn
def ParseVariableBinaryHeader(header):
header_detect = '#'
header = header.strip("\n").split()
assert(header[0] == header_detect)
name = header[1]
dtype = header[2]
nb_components = int(header[3])
nx = int(header[4])
ny = int(header[5])
nz = int(header[6])
return name, dtype, nb_components, nx, ny, nz
receiver_filename = "output/receivers.dat"
receiver_file = open(receiver_filename, 'r')
readlines = receiver_file.read().split("\n")
receiver_file.close()
temp_filename = "tmp.binary"
tempfile = open(temp_filename, 'wb')
# Parse header
header = readlines[0]
name, dtype, nb_components, nx, ny, nz = ParseVariableBinaryHeader(header)
# Write data without header
for line in readlines[1:]:
tempfile.write(line + "\n")
tempfile.close()
tempfile = open(temp_filename, 'rb')
data = numpy.fromfile(tempfile, dtype = 'float_')
tempfile.close()
if os.path.exists(temp_filename):
print "Removing temporary file " + str(temp_filename)
os.remove(temp_filename)
print data.shape, nx, nz
data = data.reshape(nz, nx)
amplitude_max = max(numpy.amax(data), - numpy.amin(data))
print "amplitude_max=", amplitude_max
rcv_ids = {'rcv1': (nx / 2, 'blue'),
'rcv2': (nx / 4, 'red'),
'rcv3': (3 * nx / 5, 'green'),
# 'rcv4': (1200, 'orange'),
# 'rcv5': (800, 'purple'),
}
plt.figure()
with seaborn.axes_style("dark"):
cmap = 'gray'
plt.imshow(data, cmap = cmap, interpolation = 'none', aspect = 'auto', vmin = - 0.1 * amplitude_max, vmax = 0.1 * amplitude_max)
for key, value in rcv_ids.iteritems():
rcv_id, color = value
plt.plot([rcv_id, rcv_id], [0.0, nz], color = color, linewidth = 2)
plt.xlim([0,nx])
plt.ylim([nz,0])
plt.figure()
cnt = 1
for key, value in rcv_ids.iteritems():
rcv_id, color = value
offset = numpy.power(-1.0, cnt) * (2.0 * amplitude_max) * (cnt / 2)
print offset
plt.plot(offset + data[:, rcv_id], color = color, linewidth = 2, label = key)
cnt += 1
plt.legend()
plt.show()
|
Add python script to read receiver file#! /usr/bin/env python
import numpy
from matplotlib import pyplot as plt
import matplotlib.colors as colors
import os
import sys
import seaborn
def ParseVariableBinaryHeader(header):
header_detect = '#'
header = header.strip("\n").split()
assert(header[0] == header_detect)
name = header[1]
dtype = header[2]
nb_components = int(header[3])
nx = int(header[4])
ny = int(header[5])
nz = int(header[6])
return name, dtype, nb_components, nx, ny, nz
receiver_filename = "output/receivers.dat"
receiver_file = open(receiver_filename, 'r')
readlines = receiver_file.read().split("\n")
receiver_file.close()
temp_filename = "tmp.binary"
tempfile = open(temp_filename, 'wb')
# Parse header
header = readlines[0]
name, dtype, nb_components, nx, ny, nz = ParseVariableBinaryHeader(header)
# Write data without header
for line in readlines[1:]:
tempfile.write(line + "\n")
tempfile.close()
tempfile = open(temp_filename, 'rb')
data = numpy.fromfile(tempfile, dtype = 'float_')
tempfile.close()
if os.path.exists(temp_filename):
print "Removing temporary file " + str(temp_filename)
os.remove(temp_filename)
print data.shape, nx, nz
data = data.reshape(nz, nx)
amplitude_max = max(numpy.amax(data), - numpy.amin(data))
print "amplitude_max=", amplitude_max
rcv_ids = {'rcv1': (nx / 2, 'blue'),
'rcv2': (nx / 4, 'red'),
'rcv3': (3 * nx / 5, 'green'),
# 'rcv4': (1200, 'orange'),
# 'rcv5': (800, 'purple'),
}
plt.figure()
with seaborn.axes_style("dark"):
cmap = 'gray'
plt.imshow(data, cmap = cmap, interpolation = 'none', aspect = 'auto', vmin = - 0.1 * amplitude_max, vmax = 0.1 * amplitude_max)
for key, value in rcv_ids.iteritems():
rcv_id, color = value
plt.plot([rcv_id, rcv_id], [0.0, nz], color = color, linewidth = 2)
plt.xlim([0,nx])
plt.ylim([nz,0])
plt.figure()
cnt = 1
for key, value in rcv_ids.iteritems():
rcv_id, color = value
offset = numpy.power(-1.0, cnt) * (2.0 * amplitude_max) * (cnt / 2)
print offset
plt.plot(offset + data[:, rcv_id], color = color, linewidth = 2, label = key)
cnt += 1
plt.legend()
plt.show()
|
<commit_before><commit_msg>Add python script to read receiver file<commit_after>#! /usr/bin/env python
import numpy
from matplotlib import pyplot as plt
import matplotlib.colors as colors
import os
import sys
import seaborn
def ParseVariableBinaryHeader(header):
header_detect = '#'
header = header.strip("\n").split()
assert(header[0] == header_detect)
name = header[1]
dtype = header[2]
nb_components = int(header[3])
nx = int(header[4])
ny = int(header[5])
nz = int(header[6])
return name, dtype, nb_components, nx, ny, nz
receiver_filename = "output/receivers.dat"
receiver_file = open(receiver_filename, 'r')
readlines = receiver_file.read().split("\n")
receiver_file.close()
temp_filename = "tmp.binary"
tempfile = open(temp_filename, 'wb')
# Parse header
header = readlines[0]
name, dtype, nb_components, nx, ny, nz = ParseVariableBinaryHeader(header)
# Write data without header
for line in readlines[1:]:
tempfile.write(line + "\n")
tempfile.close()
tempfile = open(temp_filename, 'rb')
data = numpy.fromfile(tempfile, dtype = 'float_')
tempfile.close()
if os.path.exists(temp_filename):
print "Removing temporary file " + str(temp_filename)
os.remove(temp_filename)
print data.shape, nx, nz
data = data.reshape(nz, nx)
amplitude_max = max(numpy.amax(data), - numpy.amin(data))
print "amplitude_max=", amplitude_max
rcv_ids = {'rcv1': (nx / 2, 'blue'),
'rcv2': (nx / 4, 'red'),
'rcv3': (3 * nx / 5, 'green'),
# 'rcv4': (1200, 'orange'),
# 'rcv5': (800, 'purple'),
}
plt.figure()
with seaborn.axes_style("dark"):
cmap = 'gray'
plt.imshow(data, cmap = cmap, interpolation = 'none', aspect = 'auto', vmin = - 0.1 * amplitude_max, vmax = 0.1 * amplitude_max)
for key, value in rcv_ids.iteritems():
rcv_id, color = value
plt.plot([rcv_id, rcv_id], [0.0, nz], color = color, linewidth = 2)
plt.xlim([0,nx])
plt.ylim([nz,0])
plt.figure()
cnt = 1
for key, value in rcv_ids.iteritems():
rcv_id, color = value
offset = numpy.power(-1.0, cnt) * (2.0 * amplitude_max) * (cnt / 2)
print offset
plt.plot(offset + data[:, rcv_id], color = color, linewidth = 2, label = key)
cnt += 1
plt.legend()
plt.show()
|
|
2dc8ae91713fdd73ac1e835dcb191714c2e93593
|
tests/test_runner.py
|
tests/test_runner.py
|
from twisted.trial import unittest
from ooni.inputunit import InputUnit
from ooni.nettest import NetTestCase
from ooni.reporter import OReporter
from ooni.runner import loadTestsAndOptions, runTestCasesWithInputUnit
class DummyTestCase(NetTestCase):
def test_a(self):
self.report['bar'] = 'bar'
def test_b(self):
self.report['foo'] = 'foo'
class DummyTestCasePP(DummyTestCase):
def postProcessor(self, report):
self.report['antani'] = 'sblinda'
class DummyReporter(OReporter):
dummy_report = []
def createReport(self, options):
pass
def writeReportEntry(self, entry):
self.dummy_report.append(entry)
class TestRunner(unittest.TestCase):
def test_load_test_and_options(self):
input_unit = InputUnit([0,1,2,3,4])
cmd_line_options = {}
test_cases, options = loadTestsAndOptions([DummyTestCase],
cmd_line_options)
self.assertEqual(test_cases[0][1], 'test_b')
self.assertEqual(test_cases[1][1], 'test_a')
def test_run_testcase_with_input_unit(self):
oreporter = DummyReporter()
oreporter.dummy_report = []
def done(result):
report = oreporter.dummy_report
self.assertEqual(len(report), 5*2)
for idx, entry in enumerate(oreporter.dummy_report):
if idx % 2 == 0:
self.assertEqual(entry['report']['foo'], 'foo')
else:
self.assertEqual(entry['report']['bar'], 'bar')
input_unit = InputUnit([0,1,2,3,4])
cmd_line_options = {'collector': None}
test_cases, options = loadTestsAndOptions([DummyTestCase],
cmd_line_options)
d = runTestCasesWithInputUnit(test_cases, input_unit, oreporter)
d.addBoth(done)
return d
def test_with_post_processing(self):
oreporter = DummyReporter()
oreporter.dummy_report = []
def done(result):
report = oreporter.dummy_report
self.assertEqual(len(report), 3)
for entry in report:
if entry['test_name'] == 'summary':
self.assertEqual(entry['report'], {'antani': 'sblinda'})
input_unit = InputUnit([None])
cmd_line_options = {'collector': None}
test_cases, options = loadTestsAndOptions([DummyTestCasePP],
cmd_line_options)
d = runTestCasesWithInputUnit(test_cases, input_unit, oreporter)
d.addBoth(done)
return d
|
Add unittests for some methods of runner
|
Add unittests for some methods of runner
|
Python
|
bsd-2-clause
|
lordappsec/ooni-probe,juga0/ooni-probe,lordappsec/ooni-probe,Karthikeyan-kkk/ooni-probe,juga0/ooni-probe,0xPoly/ooni-probe,lordappsec/ooni-probe,kdmurray91/ooni-probe,juga0/ooni-probe,0xPoly/ooni-probe,kdmurray91/ooni-probe,lordappsec/ooni-probe,juga0/ooni-probe,Karthikeyan-kkk/ooni-probe,0xPoly/ooni-probe,Karthikeyan-kkk/ooni-probe,0xPoly/ooni-probe,kdmurray91/ooni-probe,Karthikeyan-kkk/ooni-probe,kdmurray91/ooni-probe
|
Add unittests for some methods of runner
|
from twisted.trial import unittest
from ooni.inputunit import InputUnit
from ooni.nettest import NetTestCase
from ooni.reporter import OReporter
from ooni.runner import loadTestsAndOptions, runTestCasesWithInputUnit
class DummyTestCase(NetTestCase):
def test_a(self):
self.report['bar'] = 'bar'
def test_b(self):
self.report['foo'] = 'foo'
class DummyTestCasePP(DummyTestCase):
def postProcessor(self, report):
self.report['antani'] = 'sblinda'
class DummyReporter(OReporter):
dummy_report = []
def createReport(self, options):
pass
def writeReportEntry(self, entry):
self.dummy_report.append(entry)
class TestRunner(unittest.TestCase):
def test_load_test_and_options(self):
input_unit = InputUnit([0,1,2,3,4])
cmd_line_options = {}
test_cases, options = loadTestsAndOptions([DummyTestCase],
cmd_line_options)
self.assertEqual(test_cases[0][1], 'test_b')
self.assertEqual(test_cases[1][1], 'test_a')
def test_run_testcase_with_input_unit(self):
oreporter = DummyReporter()
oreporter.dummy_report = []
def done(result):
report = oreporter.dummy_report
self.assertEqual(len(report), 5*2)
for idx, entry in enumerate(oreporter.dummy_report):
if idx % 2 == 0:
self.assertEqual(entry['report']['foo'], 'foo')
else:
self.assertEqual(entry['report']['bar'], 'bar')
input_unit = InputUnit([0,1,2,3,4])
cmd_line_options = {'collector': None}
test_cases, options = loadTestsAndOptions([DummyTestCase],
cmd_line_options)
d = runTestCasesWithInputUnit(test_cases, input_unit, oreporter)
d.addBoth(done)
return d
def test_with_post_processing(self):
oreporter = DummyReporter()
oreporter.dummy_report = []
def done(result):
report = oreporter.dummy_report
self.assertEqual(len(report), 3)
for entry in report:
if entry['test_name'] == 'summary':
self.assertEqual(entry['report'], {'antani': 'sblinda'})
input_unit = InputUnit([None])
cmd_line_options = {'collector': None}
test_cases, options = loadTestsAndOptions([DummyTestCasePP],
cmd_line_options)
d = runTestCasesWithInputUnit(test_cases, input_unit, oreporter)
d.addBoth(done)
return d
|
<commit_before><commit_msg>Add unittests for some methods of runner<commit_after>
|
from twisted.trial import unittest
from ooni.inputunit import InputUnit
from ooni.nettest import NetTestCase
from ooni.reporter import OReporter
from ooni.runner import loadTestsAndOptions, runTestCasesWithInputUnit
class DummyTestCase(NetTestCase):
def test_a(self):
self.report['bar'] = 'bar'
def test_b(self):
self.report['foo'] = 'foo'
class DummyTestCasePP(DummyTestCase):
def postProcessor(self, report):
self.report['antani'] = 'sblinda'
class DummyReporter(OReporter):
dummy_report = []
def createReport(self, options):
pass
def writeReportEntry(self, entry):
self.dummy_report.append(entry)
class TestRunner(unittest.TestCase):
def test_load_test_and_options(self):
input_unit = InputUnit([0,1,2,3,4])
cmd_line_options = {}
test_cases, options = loadTestsAndOptions([DummyTestCase],
cmd_line_options)
self.assertEqual(test_cases[0][1], 'test_b')
self.assertEqual(test_cases[1][1], 'test_a')
def test_run_testcase_with_input_unit(self):
oreporter = DummyReporter()
oreporter.dummy_report = []
def done(result):
report = oreporter.dummy_report
self.assertEqual(len(report), 5*2)
for idx, entry in enumerate(oreporter.dummy_report):
if idx % 2 == 0:
self.assertEqual(entry['report']['foo'], 'foo')
else:
self.assertEqual(entry['report']['bar'], 'bar')
input_unit = InputUnit([0,1,2,3,4])
cmd_line_options = {'collector': None}
test_cases, options = loadTestsAndOptions([DummyTestCase],
cmd_line_options)
d = runTestCasesWithInputUnit(test_cases, input_unit, oreporter)
d.addBoth(done)
return d
def test_with_post_processing(self):
oreporter = DummyReporter()
oreporter.dummy_report = []
def done(result):
report = oreporter.dummy_report
self.assertEqual(len(report), 3)
for entry in report:
if entry['test_name'] == 'summary':
self.assertEqual(entry['report'], {'antani': 'sblinda'})
input_unit = InputUnit([None])
cmd_line_options = {'collector': None}
test_cases, options = loadTestsAndOptions([DummyTestCasePP],
cmd_line_options)
d = runTestCasesWithInputUnit(test_cases, input_unit, oreporter)
d.addBoth(done)
return d
|
Add unittests for some methods of runnerfrom twisted.trial import unittest
from ooni.inputunit import InputUnit
from ooni.nettest import NetTestCase
from ooni.reporter import OReporter
from ooni.runner import loadTestsAndOptions, runTestCasesWithInputUnit
class DummyTestCase(NetTestCase):
def test_a(self):
self.report['bar'] = 'bar'
def test_b(self):
self.report['foo'] = 'foo'
class DummyTestCasePP(DummyTestCase):
def postProcessor(self, report):
self.report['antani'] = 'sblinda'
class DummyReporter(OReporter):
dummy_report = []
def createReport(self, options):
pass
def writeReportEntry(self, entry):
self.dummy_report.append(entry)
class TestRunner(unittest.TestCase):
def test_load_test_and_options(self):
input_unit = InputUnit([0,1,2,3,4])
cmd_line_options = {}
test_cases, options = loadTestsAndOptions([DummyTestCase],
cmd_line_options)
self.assertEqual(test_cases[0][1], 'test_b')
self.assertEqual(test_cases[1][1], 'test_a')
def test_run_testcase_with_input_unit(self):
oreporter = DummyReporter()
oreporter.dummy_report = []
def done(result):
report = oreporter.dummy_report
self.assertEqual(len(report), 5*2)
for idx, entry in enumerate(oreporter.dummy_report):
if idx % 2 == 0:
self.assertEqual(entry['report']['foo'], 'foo')
else:
self.assertEqual(entry['report']['bar'], 'bar')
input_unit = InputUnit([0,1,2,3,4])
cmd_line_options = {'collector': None}
test_cases, options = loadTestsAndOptions([DummyTestCase],
cmd_line_options)
d = runTestCasesWithInputUnit(test_cases, input_unit, oreporter)
d.addBoth(done)
return d
def test_with_post_processing(self):
oreporter = DummyReporter()
oreporter.dummy_report = []
def done(result):
report = oreporter.dummy_report
self.assertEqual(len(report), 3)
for entry in report:
if entry['test_name'] == 'summary':
self.assertEqual(entry['report'], {'antani': 'sblinda'})
input_unit = InputUnit([None])
cmd_line_options = {'collector': None}
test_cases, options = loadTestsAndOptions([DummyTestCasePP],
cmd_line_options)
d = runTestCasesWithInputUnit(test_cases, input_unit, oreporter)
d.addBoth(done)
return d
|
<commit_before><commit_msg>Add unittests for some methods of runner<commit_after>from twisted.trial import unittest
from ooni.inputunit import InputUnit
from ooni.nettest import NetTestCase
from ooni.reporter import OReporter
from ooni.runner import loadTestsAndOptions, runTestCasesWithInputUnit
class DummyTestCase(NetTestCase):
def test_a(self):
self.report['bar'] = 'bar'
def test_b(self):
self.report['foo'] = 'foo'
class DummyTestCasePP(DummyTestCase):
def postProcessor(self, report):
self.report['antani'] = 'sblinda'
class DummyReporter(OReporter):
dummy_report = []
def createReport(self, options):
pass
def writeReportEntry(self, entry):
self.dummy_report.append(entry)
class TestRunner(unittest.TestCase):
def test_load_test_and_options(self):
input_unit = InputUnit([0,1,2,3,4])
cmd_line_options = {}
test_cases, options = loadTestsAndOptions([DummyTestCase],
cmd_line_options)
self.assertEqual(test_cases[0][1], 'test_b')
self.assertEqual(test_cases[1][1], 'test_a')
def test_run_testcase_with_input_unit(self):
oreporter = DummyReporter()
oreporter.dummy_report = []
def done(result):
report = oreporter.dummy_report
self.assertEqual(len(report), 5*2)
for idx, entry in enumerate(oreporter.dummy_report):
if idx % 2 == 0:
self.assertEqual(entry['report']['foo'], 'foo')
else:
self.assertEqual(entry['report']['bar'], 'bar')
input_unit = InputUnit([0,1,2,3,4])
cmd_line_options = {'collector': None}
test_cases, options = loadTestsAndOptions([DummyTestCase],
cmd_line_options)
d = runTestCasesWithInputUnit(test_cases, input_unit, oreporter)
d.addBoth(done)
return d
def test_with_post_processing(self):
oreporter = DummyReporter()
oreporter.dummy_report = []
def done(result):
report = oreporter.dummy_report
self.assertEqual(len(report), 3)
for entry in report:
if entry['test_name'] == 'summary':
self.assertEqual(entry['report'], {'antani': 'sblinda'})
input_unit = InputUnit([None])
cmd_line_options = {'collector': None}
test_cases, options = loadTestsAndOptions([DummyTestCasePP],
cmd_line_options)
d = runTestCasesWithInputUnit(test_cases, input_unit, oreporter)
d.addBoth(done)
return d
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.