commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
18085ebbf45473046980d182fe285d6663698e72
|
src/query_processing/query_processing.py
|
src/query_processing/query_processing.py
|
# LING 573 Question Answering System
# Code last updated 4/15/14 by Andrea Kahn
# This code implements a QueryProcessor for the question answering system.
# TODO: A QueryProcessor should be initialized with the Question object, but should it
# have this question as an attribute, or should it have attributes id, type, q, target that
# match those of the question?
# Advantage of first approach: allows us to change the Question and have the QueryProcessor
# update automatically (not sure we'd ever do this).
# Advantage of second approach: saves typing (access the id with self.id, versus self.question.id).
# A QueryProcessor object has the attribute "question", a Question object.
class QueryProcessor(object):
def __init__(self, question):
self.question = question
# This method returns a set of SearchQuery objects.
def generate_queries(self):
pass
# This method returns an AnswerTemplate object.
def generate_ans_template(self):
# NB: The following if statement should always evaluate as True in our system, but
# its inclusion enables the system to more easily be extended to handle other types
# of questions, for which the text-processing and AnswerTemplate-generation steps
# might be slightlydifferent.
if self.question.type=="factoid":
# do some sort of text-processing on the natural-language question and context
# to determine NE type
# generate a corresponding AnswerTemplate object
# return it
pass
else:
stderr.write("Warning: System can only handle \"factoid\" questions\n")
|
Add skeleton for query-processing module
|
Add skeleton for query-processing module
|
Python
|
mit
|
amkahn/question-answering,amkahn/question-answering
|
Add skeleton for query-processing module
|
# LING 573 Question Answering System
# Code last updated 4/15/14 by Andrea Kahn
# This code implements a QueryProcessor for the question answering system.
# TODO: A QueryProcessor should be initialized with the Question object, but should it
# have this question as an attribute, or should it have attributes id, type, q, target that
# match those of the question?
# Advantage of first approach: allows us to change the Question and have the QueryProcessor
# update automatically (not sure we'd ever do this).
# Advantage of second approach: saves typing (access the id with self.id, versus self.question.id).
# A QueryProcessor object has the attribute "question", a Question object.
class QueryProcessor(object):
def __init__(self, question):
self.question = question
# This method returns a set of SearchQuery objects.
def generate_queries(self):
pass
# This method returns an AnswerTemplate object.
def generate_ans_template(self):
# NB: The following if statement should always evaluate as True in our system, but
# its inclusion enables the system to more easily be extended to handle other types
# of questions, for which the text-processing and AnswerTemplate-generation steps
# might be slightlydifferent.
if self.question.type=="factoid":
# do some sort of text-processing on the natural-language question and context
# to determine NE type
# generate a corresponding AnswerTemplate object
# return it
pass
else:
stderr.write("Warning: System can only handle \"factoid\" questions\n")
|
<commit_before><commit_msg>Add skeleton for query-processing module<commit_after>
|
# LING 573 Question Answering System
# Code last updated 4/15/14 by Andrea Kahn
# This code implements a QueryProcessor for the question answering system.
# TODO: A QueryProcessor should be initialized with the Question object, but should it
# have this question as an attribute, or should it have attributes id, type, q, target that
# match those of the question?
# Advantage of first approach: allows us to change the Question and have the QueryProcessor
# update automatically (not sure we'd ever do this).
# Advantage of second approach: saves typing (access the id with self.id, versus self.question.id).
# A QueryProcessor object has the attribute "question", a Question object.
class QueryProcessor(object):
def __init__(self, question):
self.question = question
# This method returns a set of SearchQuery objects.
def generate_queries(self):
pass
# This method returns an AnswerTemplate object.
def generate_ans_template(self):
# NB: The following if statement should always evaluate as True in our system, but
# its inclusion enables the system to more easily be extended to handle other types
# of questions, for which the text-processing and AnswerTemplate-generation steps
# might be slightlydifferent.
if self.question.type=="factoid":
# do some sort of text-processing on the natural-language question and context
# to determine NE type
# generate a corresponding AnswerTemplate object
# return it
pass
else:
stderr.write("Warning: System can only handle \"factoid\" questions\n")
|
Add skeleton for query-processing module# LING 573 Question Answering System
# Code last updated 4/15/14 by Andrea Kahn
# This code implements a QueryProcessor for the question answering system.
# TODO: A QueryProcessor should be initialized with the Question object, but should it
# have this question as an attribute, or should it have attributes id, type, q, target that
# match those of the question?
# Advantage of first approach: allows us to change the Question and have the QueryProcessor
# update automatically (not sure we'd ever do this).
# Advantage of second approach: saves typing (access the id with self.id, versus self.question.id).
# A QueryProcessor object has the attribute "question", a Question object.
class QueryProcessor(object):
def __init__(self, question):
self.question = question
# This method returns a set of SearchQuery objects.
def generate_queries(self):
pass
# This method returns an AnswerTemplate object.
def generate_ans_template(self):
# NB: The following if statement should always evaluate as True in our system, but
# its inclusion enables the system to more easily be extended to handle other types
# of questions, for which the text-processing and AnswerTemplate-generation steps
# might be slightlydifferent.
if self.question.type=="factoid":
# do some sort of text-processing on the natural-language question and context
# to determine NE type
# generate a corresponding AnswerTemplate object
# return it
pass
else:
stderr.write("Warning: System can only handle \"factoid\" questions\n")
|
<commit_before><commit_msg>Add skeleton for query-processing module<commit_after># LING 573 Question Answering System
# Code last updated 4/15/14 by Andrea Kahn
# This code implements a QueryProcessor for the question answering system.
# TODO: A QueryProcessor should be initialized with the Question object, but should it
# have this question as an attribute, or should it have attributes id, type, q, target that
# match those of the question?
# Advantage of first approach: allows us to change the Question and have the QueryProcessor
# update automatically (not sure we'd ever do this).
# Advantage of second approach: saves typing (access the id with self.id, versus self.question.id).
# A QueryProcessor object has the attribute "question", a Question object.
class QueryProcessor(object):
def __init__(self, question):
self.question = question
# This method returns a set of SearchQuery objects.
def generate_queries(self):
pass
# This method returns an AnswerTemplate object.
def generate_ans_template(self):
# NB: The following if statement should always evaluate as True in our system, but
# its inclusion enables the system to more easily be extended to handle other types
# of questions, for which the text-processing and AnswerTemplate-generation steps
# might be slightlydifferent.
if self.question.type=="factoid":
# do some sort of text-processing on the natural-language question and context
# to determine NE type
# generate a corresponding AnswerTemplate object
# return it
pass
else:
stderr.write("Warning: System can only handle \"factoid\" questions\n")
|
|
87a5419d9717d641bf4c30740d2d431f4fd7a478
|
setup.py
|
setup.py
|
"""Akhet installation script.
"""
import os
from setuptools import setup
from setuptools import find_packages
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, "README.txt")).read()
CHANGES = open(os.path.join(here, "CHANGES.txt")).read()
requires = [
"pyramid",
"SQLAlchemy",
"transaction",
"repoze.tm2",
"zope.sqlalchemy"]
entry_points = """
[paste.paster_create_template]
pyramid_sqla=pyramid_sqla.paster_templates:PyramidSQLAProjectTemplate
"""
setup(name="pyramid_sqla",
version="1.0rc1",
description="A SQLAlchemy library and Pylons-like application template for Pyramid",
long_description=README + "\n\n" + CHANGES,
classifiers=[
"Intended Audience :: Developers",
"Framework :: Pylons",
"Programming Language :: Python",
"License :: OSI Approved :: MIT License",
],
keywords="web wsgi pylons pyramid",
author="Mike Orr",
author_email="sluggoster@gmail.com",
url="http://docs.pylonshq.com",
license="MIT",
packages=find_packages(),
include_package_data=True,
zip_safe=False,
tests_require = requires,
install_requires = requires,
test_suite="pyramid_sqla",
entry_points=entry_points,
)
|
"""Akhet installation script.
"""
import os
from setuptools import setup
from setuptools import find_packages
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, "README.txt")).read()
CHANGES = open(os.path.join(here, "CHANGES.txt")).read()
requires = [
"pyramid",
]
entry_points = """
[paste.paster_create_template]
pyramid_sqla=pyramid_sqla.paster_templates:PyramidSQLAProjectTemplate
"""
setup(name="pyramid_sqla",
version="1.0rc1",
description="A SQLAlchemy library and Pylons-like application template for Pyramid",
long_description=README + "\n\n" + CHANGES,
classifiers=[
"Intended Audience :: Developers",
"Framework :: Pylons",
"Programming Language :: Python",
"License :: OSI Approved :: MIT License",
],
keywords="web wsgi pylons pyramid",
author="Mike Orr",
author_email="sluggoster@gmail.com",
url="http://docs.pylonshq.com",
license="MIT",
packages=find_packages(),
include_package_data=True,
zip_safe=False,
tests_require = requires,
install_requires = requires,
test_suite="pyramid_sqla",
entry_points=entry_points,
)
|
Delete all formal dependencies except 'pyramid'.
|
Delete all formal dependencies except 'pyramid'.
|
Python
|
mit
|
Pylons/akhet,hlwsmith/akhet,Pylons/akhet,hlwsmith/akhet,hlwsmith/akhet
|
"""Akhet installation script.
"""
import os
from setuptools import setup
from setuptools import find_packages
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, "README.txt")).read()
CHANGES = open(os.path.join(here, "CHANGES.txt")).read()
requires = [
"pyramid",
"SQLAlchemy",
"transaction",
"repoze.tm2",
"zope.sqlalchemy"]
entry_points = """
[paste.paster_create_template]
pyramid_sqla=pyramid_sqla.paster_templates:PyramidSQLAProjectTemplate
"""
setup(name="pyramid_sqla",
version="1.0rc1",
description="A SQLAlchemy library and Pylons-like application template for Pyramid",
long_description=README + "\n\n" + CHANGES,
classifiers=[
"Intended Audience :: Developers",
"Framework :: Pylons",
"Programming Language :: Python",
"License :: OSI Approved :: MIT License",
],
keywords="web wsgi pylons pyramid",
author="Mike Orr",
author_email="sluggoster@gmail.com",
url="http://docs.pylonshq.com",
license="MIT",
packages=find_packages(),
include_package_data=True,
zip_safe=False,
tests_require = requires,
install_requires = requires,
test_suite="pyramid_sqla",
entry_points=entry_points,
)
Delete all formal dependencies except 'pyramid'.
|
"""Akhet installation script.
"""
import os
from setuptools import setup
from setuptools import find_packages
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, "README.txt")).read()
CHANGES = open(os.path.join(here, "CHANGES.txt")).read()
requires = [
"pyramid",
]
entry_points = """
[paste.paster_create_template]
pyramid_sqla=pyramid_sqla.paster_templates:PyramidSQLAProjectTemplate
"""
setup(name="pyramid_sqla",
version="1.0rc1",
description="A SQLAlchemy library and Pylons-like application template for Pyramid",
long_description=README + "\n\n" + CHANGES,
classifiers=[
"Intended Audience :: Developers",
"Framework :: Pylons",
"Programming Language :: Python",
"License :: OSI Approved :: MIT License",
],
keywords="web wsgi pylons pyramid",
author="Mike Orr",
author_email="sluggoster@gmail.com",
url="http://docs.pylonshq.com",
license="MIT",
packages=find_packages(),
include_package_data=True,
zip_safe=False,
tests_require = requires,
install_requires = requires,
test_suite="pyramid_sqla",
entry_points=entry_points,
)
|
<commit_before>"""Akhet installation script.
"""
import os
from setuptools import setup
from setuptools import find_packages
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, "README.txt")).read()
CHANGES = open(os.path.join(here, "CHANGES.txt")).read()
requires = [
"pyramid",
"SQLAlchemy",
"transaction",
"repoze.tm2",
"zope.sqlalchemy"]
entry_points = """
[paste.paster_create_template]
pyramid_sqla=pyramid_sqla.paster_templates:PyramidSQLAProjectTemplate
"""
setup(name="pyramid_sqla",
version="1.0rc1",
description="A SQLAlchemy library and Pylons-like application template for Pyramid",
long_description=README + "\n\n" + CHANGES,
classifiers=[
"Intended Audience :: Developers",
"Framework :: Pylons",
"Programming Language :: Python",
"License :: OSI Approved :: MIT License",
],
keywords="web wsgi pylons pyramid",
author="Mike Orr",
author_email="sluggoster@gmail.com",
url="http://docs.pylonshq.com",
license="MIT",
packages=find_packages(),
include_package_data=True,
zip_safe=False,
tests_require = requires,
install_requires = requires,
test_suite="pyramid_sqla",
entry_points=entry_points,
)
<commit_msg>Delete all formal dependencies except 'pyramid'.<commit_after>
|
"""Akhet installation script.
"""
import os
from setuptools import setup
from setuptools import find_packages
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, "README.txt")).read()
CHANGES = open(os.path.join(here, "CHANGES.txt")).read()
requires = [
"pyramid",
]
entry_points = """
[paste.paster_create_template]
pyramid_sqla=pyramid_sqla.paster_templates:PyramidSQLAProjectTemplate
"""
setup(name="pyramid_sqla",
version="1.0rc1",
description="A SQLAlchemy library and Pylons-like application template for Pyramid",
long_description=README + "\n\n" + CHANGES,
classifiers=[
"Intended Audience :: Developers",
"Framework :: Pylons",
"Programming Language :: Python",
"License :: OSI Approved :: MIT License",
],
keywords="web wsgi pylons pyramid",
author="Mike Orr",
author_email="sluggoster@gmail.com",
url="http://docs.pylonshq.com",
license="MIT",
packages=find_packages(),
include_package_data=True,
zip_safe=False,
tests_require = requires,
install_requires = requires,
test_suite="pyramid_sqla",
entry_points=entry_points,
)
|
"""Akhet installation script.
"""
import os
from setuptools import setup
from setuptools import find_packages
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, "README.txt")).read()
CHANGES = open(os.path.join(here, "CHANGES.txt")).read()
requires = [
"pyramid",
"SQLAlchemy",
"transaction",
"repoze.tm2",
"zope.sqlalchemy"]
entry_points = """
[paste.paster_create_template]
pyramid_sqla=pyramid_sqla.paster_templates:PyramidSQLAProjectTemplate
"""
setup(name="pyramid_sqla",
version="1.0rc1",
description="A SQLAlchemy library and Pylons-like application template for Pyramid",
long_description=README + "\n\n" + CHANGES,
classifiers=[
"Intended Audience :: Developers",
"Framework :: Pylons",
"Programming Language :: Python",
"License :: OSI Approved :: MIT License",
],
keywords="web wsgi pylons pyramid",
author="Mike Orr",
author_email="sluggoster@gmail.com",
url="http://docs.pylonshq.com",
license="MIT",
packages=find_packages(),
include_package_data=True,
zip_safe=False,
tests_require = requires,
install_requires = requires,
test_suite="pyramid_sqla",
entry_points=entry_points,
)
Delete all formal dependencies except 'pyramid'."""Akhet installation script.
"""
import os
from setuptools import setup
from setuptools import find_packages
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, "README.txt")).read()
CHANGES = open(os.path.join(here, "CHANGES.txt")).read()
requires = [
"pyramid",
]
entry_points = """
[paste.paster_create_template]
pyramid_sqla=pyramid_sqla.paster_templates:PyramidSQLAProjectTemplate
"""
setup(name="pyramid_sqla",
version="1.0rc1",
description="A SQLAlchemy library and Pylons-like application template for Pyramid",
long_description=README + "\n\n" + CHANGES,
classifiers=[
"Intended Audience :: Developers",
"Framework :: Pylons",
"Programming Language :: Python",
"License :: OSI Approved :: MIT License",
],
keywords="web wsgi pylons pyramid",
author="Mike Orr",
author_email="sluggoster@gmail.com",
url="http://docs.pylonshq.com",
license="MIT",
packages=find_packages(),
include_package_data=True,
zip_safe=False,
tests_require = requires,
install_requires = requires,
test_suite="pyramid_sqla",
entry_points=entry_points,
)
|
<commit_before>"""Akhet installation script.
"""
import os
from setuptools import setup
from setuptools import find_packages
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, "README.txt")).read()
CHANGES = open(os.path.join(here, "CHANGES.txt")).read()
requires = [
"pyramid",
"SQLAlchemy",
"transaction",
"repoze.tm2",
"zope.sqlalchemy"]
entry_points = """
[paste.paster_create_template]
pyramid_sqla=pyramid_sqla.paster_templates:PyramidSQLAProjectTemplate
"""
setup(name="pyramid_sqla",
version="1.0rc1",
description="A SQLAlchemy library and Pylons-like application template for Pyramid",
long_description=README + "\n\n" + CHANGES,
classifiers=[
"Intended Audience :: Developers",
"Framework :: Pylons",
"Programming Language :: Python",
"License :: OSI Approved :: MIT License",
],
keywords="web wsgi pylons pyramid",
author="Mike Orr",
author_email="sluggoster@gmail.com",
url="http://docs.pylonshq.com",
license="MIT",
packages=find_packages(),
include_package_data=True,
zip_safe=False,
tests_require = requires,
install_requires = requires,
test_suite="pyramid_sqla",
entry_points=entry_points,
)
<commit_msg>Delete all formal dependencies except 'pyramid'.<commit_after>"""Akhet installation script.
"""
import os
from setuptools import setup
from setuptools import find_packages
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, "README.txt")).read()
CHANGES = open(os.path.join(here, "CHANGES.txt")).read()
requires = [
"pyramid",
]
entry_points = """
[paste.paster_create_template]
pyramid_sqla=pyramid_sqla.paster_templates:PyramidSQLAProjectTemplate
"""
setup(name="pyramid_sqla",
version="1.0rc1",
description="A SQLAlchemy library and Pylons-like application template for Pyramid",
long_description=README + "\n\n" + CHANGES,
classifiers=[
"Intended Audience :: Developers",
"Framework :: Pylons",
"Programming Language :: Python",
"License :: OSI Approved :: MIT License",
],
keywords="web wsgi pylons pyramid",
author="Mike Orr",
author_email="sluggoster@gmail.com",
url="http://docs.pylonshq.com",
license="MIT",
packages=find_packages(),
include_package_data=True,
zip_safe=False,
tests_require = requires,
install_requires = requires,
test_suite="pyramid_sqla",
entry_points=entry_points,
)
|
7c66155295b439258c829b3d5a7fb38a40612d3e
|
alembic/versions/35f8b948e98d_delete_hidden_column.py
|
alembic/versions/35f8b948e98d_delete_hidden_column.py
|
"""Delete hidden column
Revision ID: 35f8b948e98d
Revises: 36fba9f9069d
Create Date: 2015-08-07 10:15:34.608398
"""
# revision identifiers, used by Alembic.
revision = '35f8b948e98d'
down_revision = '36fba9f9069d'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.drop_column('project', 'hidden')
def downgrade():
op.add_column('project', sa.Column('hidden', sa.Integer, default=0))
|
Add migration for removing hidden column in project
|
Add migration for removing hidden column in project
|
Python
|
agpl-3.0
|
PyBossa/pybossa,PyBossa/pybossa,geotagx/pybossa,geotagx/pybossa,Scifabric/pybossa,Scifabric/pybossa
|
Add migration for removing hidden column in project
|
"""Delete hidden column
Revision ID: 35f8b948e98d
Revises: 36fba9f9069d
Create Date: 2015-08-07 10:15:34.608398
"""
# revision identifiers, used by Alembic.
revision = '35f8b948e98d'
down_revision = '36fba9f9069d'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.drop_column('project', 'hidden')
def downgrade():
op.add_column('project', sa.Column('hidden', sa.Integer, default=0))
|
<commit_before><commit_msg>Add migration for removing hidden column in project<commit_after>
|
"""Delete hidden column
Revision ID: 35f8b948e98d
Revises: 36fba9f9069d
Create Date: 2015-08-07 10:15:34.608398
"""
# revision identifiers, used by Alembic.
revision = '35f8b948e98d'
down_revision = '36fba9f9069d'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.drop_column('project', 'hidden')
def downgrade():
op.add_column('project', sa.Column('hidden', sa.Integer, default=0))
|
Add migration for removing hidden column in project"""Delete hidden column
Revision ID: 35f8b948e98d
Revises: 36fba9f9069d
Create Date: 2015-08-07 10:15:34.608398
"""
# revision identifiers, used by Alembic.
revision = '35f8b948e98d'
down_revision = '36fba9f9069d'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.drop_column('project', 'hidden')
def downgrade():
op.add_column('project', sa.Column('hidden', sa.Integer, default=0))
|
<commit_before><commit_msg>Add migration for removing hidden column in project<commit_after>"""Delete hidden column
Revision ID: 35f8b948e98d
Revises: 36fba9f9069d
Create Date: 2015-08-07 10:15:34.608398
"""
# revision identifiers, used by Alembic.
revision = '35f8b948e98d'
down_revision = '36fba9f9069d'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.drop_column('project', 'hidden')
def downgrade():
op.add_column('project', sa.Column('hidden', sa.Integer, default=0))
|
|
97e64eae02ef5f850f8b4bd2ad1028f59c8328c7
|
Spring2018/src/make_inf_set.py
|
Spring2018/src/make_inf_set.py
|
"""
Copies files from the original dataset for use with inference.
NOTE: For inference, images need to be in the form:
image{}.jpg where {} should be replaced with 1, 2, etc.
Assumptions:
This code is run from the ci-models directory
The images/labels are placed in data/<class-name>
"""
import os, random
import argparse
import shutil
def make_clean_directory(dir_name):
"""
Remove directory (if exists) and make another (clean) directory
Parameters:
dir_name - directory name to "clean"
Returns:
None
"""
if os.path.exists(dir_name):
shutil.rmtree(dir_name, ignore_errors=True)
os.makedirs(dir_name)
def copy_rnd_images(data_dir, test_img_dir, img_count):
idx = 0
while idx < img_count:
img_name = random.choice(os.listdir(data_dir))
if img_name.split('.')[1] == 'jpg':
shutil.copy(os.path.join(data_dir, img_name),
os.path.join(test_img_dir, 'image{%02d}.jpg' % (idx)))
idx += 1
def parse_args():
"""
Parse command line arguments.
Parameters:
None
Returns:
parser arguments
"""
parser = argparse.ArgumentParser(description='Make Inference Set')
optional = parser._action_groups.pop()
required = parser.add_argument_group('required arguments')
optional.add_argument('--img_count',
dest = '--img_count',
help = 'How many images to copy from data',
default=10)
required.add_argument('--class_name',
dest = '--class_name',
help = 'Class name for data (i.e. boat2, etc.)')
parser._action_groups.append(optional)
return parser.parse_args()
if __name__ == '__main__':
args = parse_args() # Command line parameters
test_img_dir = 'data/test_imgs'
make_clean_directory(test_img_dir) # Create directory for test images
copy_rnd_images(os.path.join('data', args.class_name), test_img_dir,
args.img_count)
|
Add script to make inference set
|
Add script to make inference set
|
Python
|
mit
|
CUFCTL/DLBD,CUFCTL/DLBD
|
Add script to make inference set
|
"""
Copies files from the original dataset for use with inference.
NOTE: For inference, images need to be in the form:
image{}.jpg where {} should be replaced with 1, 2, etc.
Assumptions:
This code is run from the ci-models directory
The images/labels are placed in data/<class-name>
"""
import os, random
import argparse
import shutil
def make_clean_directory(dir_name):
"""
Remove directory (if exists) and make another (clean) directory
Parameters:
dir_name - directory name to "clean"
Returns:
None
"""
if os.path.exists(dir_name):
shutil.rmtree(dir_name, ignore_errors=True)
os.makedirs(dir_name)
def copy_rnd_images(data_dir, test_img_dir, img_count):
idx = 0
while idx < img_count:
img_name = random.choice(os.listdir(data_dir))
if img_name.split('.')[1] == 'jpg':
shutil.copy(os.path.join(data_dir, img_name),
os.path.join(test_img_dir, 'image{%02d}.jpg' % (idx)))
idx += 1
def parse_args():
"""
Parse command line arguments.
Parameters:
None
Returns:
parser arguments
"""
parser = argparse.ArgumentParser(description='Make Inference Set')
optional = parser._action_groups.pop()
required = parser.add_argument_group('required arguments')
optional.add_argument('--img_count',
dest = '--img_count',
help = 'How many images to copy from data',
default=10)
required.add_argument('--class_name',
dest = '--class_name',
help = 'Class name for data (i.e. boat2, etc.)')
parser._action_groups.append(optional)
return parser.parse_args()
if __name__ == '__main__':
args = parse_args() # Command line parameters
test_img_dir = 'data/test_imgs'
make_clean_directory(test_img_dir) # Create directory for test images
copy_rnd_images(os.path.join('data', args.class_name), test_img_dir,
args.img_count)
|
<commit_before><commit_msg>Add script to make inference set<commit_after>
|
"""
Copies files from the original dataset for use with inference.
NOTE: For inference, images need to be in the form:
image{}.jpg where {} should be replaced with 1, 2, etc.
Assumptions:
This code is run from the ci-models directory
The images/labels are placed in data/<class-name>
"""
import os, random
import argparse
import shutil
def make_clean_directory(dir_name):
"""
Remove directory (if exists) and make another (clean) directory
Parameters:
dir_name - directory name to "clean"
Returns:
None
"""
if os.path.exists(dir_name):
shutil.rmtree(dir_name, ignore_errors=True)
os.makedirs(dir_name)
def copy_rnd_images(data_dir, test_img_dir, img_count):
idx = 0
while idx < img_count:
img_name = random.choice(os.listdir(data_dir))
if img_name.split('.')[1] == 'jpg':
shutil.copy(os.path.join(data_dir, img_name),
os.path.join(test_img_dir, 'image{%02d}.jpg' % (idx)))
idx += 1
def parse_args():
"""
Parse command line arguments.
Parameters:
None
Returns:
parser arguments
"""
parser = argparse.ArgumentParser(description='Make Inference Set')
optional = parser._action_groups.pop()
required = parser.add_argument_group('required arguments')
optional.add_argument('--img_count',
dest = '--img_count',
help = 'How many images to copy from data',
default=10)
required.add_argument('--class_name',
dest = '--class_name',
help = 'Class name for data (i.e. boat2, etc.)')
parser._action_groups.append(optional)
return parser.parse_args()
if __name__ == '__main__':
args = parse_args() # Command line parameters
test_img_dir = 'data/test_imgs'
make_clean_directory(test_img_dir) # Create directory for test images
copy_rnd_images(os.path.join('data', args.class_name), test_img_dir,
args.img_count)
|
Add script to make inference set"""
Copies files from the original dataset for use with inference.
NOTE: For inference, images need to be in the form:
image{}.jpg where {} should be replaced with 1, 2, etc.
Assumptions:
This code is run from the ci-models directory
The images/labels are placed in data/<class-name>
"""
import os, random
import argparse
import shutil
def make_clean_directory(dir_name):
"""
Remove directory (if exists) and make another (clean) directory
Parameters:
dir_name - directory name to "clean"
Returns:
None
"""
if os.path.exists(dir_name):
shutil.rmtree(dir_name, ignore_errors=True)
os.makedirs(dir_name)
def copy_rnd_images(data_dir, test_img_dir, img_count):
idx = 0
while idx < img_count:
img_name = random.choice(os.listdir(data_dir))
if img_name.split('.')[1] == 'jpg':
shutil.copy(os.path.join(data_dir, img_name),
os.path.join(test_img_dir, 'image{%02d}.jpg' % (idx)))
idx += 1
def parse_args():
"""
Parse command line arguments.
Parameters:
None
Returns:
parser arguments
"""
parser = argparse.ArgumentParser(description='Make Inference Set')
optional = parser._action_groups.pop()
required = parser.add_argument_group('required arguments')
optional.add_argument('--img_count',
dest = '--img_count',
help = 'How many images to copy from data',
default=10)
required.add_argument('--class_name',
dest = '--class_name',
help = 'Class name for data (i.e. boat2, etc.)')
parser._action_groups.append(optional)
return parser.parse_args()
if __name__ == '__main__':
args = parse_args() # Command line parameters
test_img_dir = 'data/test_imgs'
make_clean_directory(test_img_dir) # Create directory for test images
copy_rnd_images(os.path.join('data', args.class_name), test_img_dir,
args.img_count)
|
<commit_before><commit_msg>Add script to make inference set<commit_after>"""
Copies files from the original dataset for use with inference.
NOTE: For inference, images need to be in the form:
image{}.jpg where {} should be replaced with 1, 2, etc.
Assumptions:
This code is run from the ci-models directory
The images/labels are placed in data/<class-name>
"""
import os, random
import argparse
import shutil
def make_clean_directory(dir_name):
"""
Remove directory (if exists) and make another (clean) directory
Parameters:
dir_name - directory name to "clean"
Returns:
None
"""
if os.path.exists(dir_name):
shutil.rmtree(dir_name, ignore_errors=True)
os.makedirs(dir_name)
def copy_rnd_images(data_dir, test_img_dir, img_count):
idx = 0
while idx < img_count:
img_name = random.choice(os.listdir(data_dir))
if img_name.split('.')[1] == 'jpg':
shutil.copy(os.path.join(data_dir, img_name),
os.path.join(test_img_dir, 'image{%02d}.jpg' % (idx)))
idx += 1
def parse_args():
"""
Parse command line arguments.
Parameters:
None
Returns:
parser arguments
"""
parser = argparse.ArgumentParser(description='Make Inference Set')
optional = parser._action_groups.pop()
required = parser.add_argument_group('required arguments')
optional.add_argument('--img_count',
dest = '--img_count',
help = 'How many images to copy from data',
default=10)
required.add_argument('--class_name',
dest = '--class_name',
help = 'Class name for data (i.e. boat2, etc.)')
parser._action_groups.append(optional)
return parser.parse_args()
if __name__ == '__main__':
args = parse_args() # Command line parameters
test_img_dir = 'data/test_imgs'
make_clean_directory(test_img_dir) # Create directory for test images
copy_rnd_images(os.path.join('data', args.class_name), test_img_dir,
args.img_count)
|
|
91e33076e343c04aea8e905a96c07edb2da04700
|
pdf_generator/page_number.py
|
pdf_generator/page_number.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from reportlab.pdfgen.canvas import Canvas
class NumberedCanvasFactory(object):
def __init__(self, x, y, text):
self._x = x
self._y = y
self._text = text
def __call__(self, *args, **kw):
return NumberedCanvas(*args,
x=self._x,
y=self._y,
text=self._text,
**kw)
class NumberedCanvas(Canvas):
def __init__(self, *args, **kwargs):
self._nc_x = kwargs.pop('x')
self._nc_y = kwargs.pop('y')
self._nc_text = kwargs.pop('text')
Canvas.__init__(self, *args, **kwargs)
self._codes = []
def showPage(self):
self._codes.append(dict(self.__dict__))
self._startPage()
def save(self):
x, y = self._nc_x, self._nc_y
if x < 0:
x = self._pagesize[0] + x
if y > 0:
y = self._pagesize[1] - y
else:
y = - y
for code in self._codes:
# recall saved page
self.__dict__.update(code)
self.setFont('Helvetica', 7)
self.drawRightString(
x, y,
self._nc_text.format(self._pageNumber, len(self._codes)),
)
Canvas.showPage(self)
Canvas.save(self)
|
Store all the state of the canvas
|
Store all the state of the canvas
|
Python
|
mit
|
cecedille1/PDF_generator
|
Store all the state of the canvas
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from reportlab.pdfgen.canvas import Canvas
class NumberedCanvasFactory(object):
def __init__(self, x, y, text):
self._x = x
self._y = y
self._text = text
def __call__(self, *args, **kw):
return NumberedCanvas(*args,
x=self._x,
y=self._y,
text=self._text,
**kw)
class NumberedCanvas(Canvas):
def __init__(self, *args, **kwargs):
self._nc_x = kwargs.pop('x')
self._nc_y = kwargs.pop('y')
self._nc_text = kwargs.pop('text')
Canvas.__init__(self, *args, **kwargs)
self._codes = []
def showPage(self):
self._codes.append(dict(self.__dict__))
self._startPage()
def save(self):
x, y = self._nc_x, self._nc_y
if x < 0:
x = self._pagesize[0] + x
if y > 0:
y = self._pagesize[1] - y
else:
y = - y
for code in self._codes:
# recall saved page
self.__dict__.update(code)
self.setFont('Helvetica', 7)
self.drawRightString(
x, y,
self._nc_text.format(self._pageNumber, len(self._codes)),
)
Canvas.showPage(self)
Canvas.save(self)
|
<commit_before><commit_msg>Store all the state of the canvas<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from reportlab.pdfgen.canvas import Canvas
class NumberedCanvasFactory(object):
def __init__(self, x, y, text):
self._x = x
self._y = y
self._text = text
def __call__(self, *args, **kw):
return NumberedCanvas(*args,
x=self._x,
y=self._y,
text=self._text,
**kw)
class NumberedCanvas(Canvas):
def __init__(self, *args, **kwargs):
self._nc_x = kwargs.pop('x')
self._nc_y = kwargs.pop('y')
self._nc_text = kwargs.pop('text')
Canvas.__init__(self, *args, **kwargs)
self._codes = []
def showPage(self):
self._codes.append(dict(self.__dict__))
self._startPage()
def save(self):
x, y = self._nc_x, self._nc_y
if x < 0:
x = self._pagesize[0] + x
if y > 0:
y = self._pagesize[1] - y
else:
y = - y
for code in self._codes:
# recall saved page
self.__dict__.update(code)
self.setFont('Helvetica', 7)
self.drawRightString(
x, y,
self._nc_text.format(self._pageNumber, len(self._codes)),
)
Canvas.showPage(self)
Canvas.save(self)
|
Store all the state of the canvas#!/usr/bin/env python
# -*- coding: utf-8 -*-
from reportlab.pdfgen.canvas import Canvas
class NumberedCanvasFactory(object):
def __init__(self, x, y, text):
self._x = x
self._y = y
self._text = text
def __call__(self, *args, **kw):
return NumberedCanvas(*args,
x=self._x,
y=self._y,
text=self._text,
**kw)
class NumberedCanvas(Canvas):
def __init__(self, *args, **kwargs):
self._nc_x = kwargs.pop('x')
self._nc_y = kwargs.pop('y')
self._nc_text = kwargs.pop('text')
Canvas.__init__(self, *args, **kwargs)
self._codes = []
def showPage(self):
self._codes.append(dict(self.__dict__))
self._startPage()
def save(self):
x, y = self._nc_x, self._nc_y
if x < 0:
x = self._pagesize[0] + x
if y > 0:
y = self._pagesize[1] - y
else:
y = - y
for code in self._codes:
# recall saved page
self.__dict__.update(code)
self.setFont('Helvetica', 7)
self.drawRightString(
x, y,
self._nc_text.format(self._pageNumber, len(self._codes)),
)
Canvas.showPage(self)
Canvas.save(self)
|
<commit_before><commit_msg>Store all the state of the canvas<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
from reportlab.pdfgen.canvas import Canvas
class NumberedCanvasFactory(object):
def __init__(self, x, y, text):
self._x = x
self._y = y
self._text = text
def __call__(self, *args, **kw):
return NumberedCanvas(*args,
x=self._x,
y=self._y,
text=self._text,
**kw)
class NumberedCanvas(Canvas):
def __init__(self, *args, **kwargs):
self._nc_x = kwargs.pop('x')
self._nc_y = kwargs.pop('y')
self._nc_text = kwargs.pop('text')
Canvas.__init__(self, *args, **kwargs)
self._codes = []
def showPage(self):
self._codes.append(dict(self.__dict__))
self._startPage()
def save(self):
x, y = self._nc_x, self._nc_y
if x < 0:
x = self._pagesize[0] + x
if y > 0:
y = self._pagesize[1] - y
else:
y = - y
for code in self._codes:
# recall saved page
self.__dict__.update(code)
self.setFont('Helvetica', 7)
self.drawRightString(
x, y,
self._nc_text.format(self._pageNumber, len(self._codes)),
)
Canvas.showPage(self)
Canvas.save(self)
|
|
026e2b6ffc70fc1732ed6f69aa76d0c1263b0b09
|
app/format_json.py
|
app/format_json.py
|
import csv
import json
import collections
data = collections.defaultdict(dict)
def main():
with open('en-us.csv', 'rb') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
keys = row[0].split('.')
merge(data, format_dict(keys, row[1]))
f = open('en-us.json', 'w')
f.write(json.dumps(data, indent=4))
def format_dict(keys, value):
if len(keys) == 1:
return {keys[0]: value}
return {keys[0]: format_dict(keys[1:], value)}
# h/t: http://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth
def merge(d, u):
for key, value in u.iteritems():
if isinstance(value, collections.Mapping):
r = merge(d.get(key, {}), value)
d[key] = r
else:
d[key] = u[key]
return d
if __name__ == '__main__':
main()
|
Add script to generate JSON with translation data
|
Add script to generate JSON with translation data
Take the csv with the translation variables and
values and convert it to a json file.
|
Python
|
apache-2.0
|
samanehsan/isp,CenterForOpenScience/isp,samanehsan/isp,CenterForOpenScience/isp,samanehsan/isp,CenterForOpenScience/isp
|
Add script to generate JSON with translation data
Take the csv with the translation variables and
values and convert it to a json file.
|
import csv
import json
import collections
data = collections.defaultdict(dict)
def main():
with open('en-us.csv', 'rb') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
keys = row[0].split('.')
merge(data, format_dict(keys, row[1]))
f = open('en-us.json', 'w')
f.write(json.dumps(data, indent=4))
def format_dict(keys, value):
if len(keys) == 1:
return {keys[0]: value}
return {keys[0]: format_dict(keys[1:], value)}
# h/t: http://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth
def merge(d, u):
for key, value in u.iteritems():
if isinstance(value, collections.Mapping):
r = merge(d.get(key, {}), value)
d[key] = r
else:
d[key] = u[key]
return d
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to generate JSON with translation data
Take the csv with the translation variables and
values and convert it to a json file.<commit_after>
|
import csv
import json
import collections
data = collections.defaultdict(dict)
def main():
with open('en-us.csv', 'rb') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
keys = row[0].split('.')
merge(data, format_dict(keys, row[1]))
f = open('en-us.json', 'w')
f.write(json.dumps(data, indent=4))
def format_dict(keys, value):
if len(keys) == 1:
return {keys[0]: value}
return {keys[0]: format_dict(keys[1:], value)}
# h/t: http://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth
def merge(d, u):
for key, value in u.iteritems():
if isinstance(value, collections.Mapping):
r = merge(d.get(key, {}), value)
d[key] = r
else:
d[key] = u[key]
return d
if __name__ == '__main__':
main()
|
Add script to generate JSON with translation data
Take the csv with the translation variables and
values and convert it to a json file.import csv
import json
import collections
data = collections.defaultdict(dict)
def main():
with open('en-us.csv', 'rb') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
keys = row[0].split('.')
merge(data, format_dict(keys, row[1]))
f = open('en-us.json', 'w')
f.write(json.dumps(data, indent=4))
def format_dict(keys, value):
if len(keys) == 1:
return {keys[0]: value}
return {keys[0]: format_dict(keys[1:], value)}
# h/t: http://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth
def merge(d, u):
for key, value in u.iteritems():
if isinstance(value, collections.Mapping):
r = merge(d.get(key, {}), value)
d[key] = r
else:
d[key] = u[key]
return d
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to generate JSON with translation data
Take the csv with the translation variables and
values and convert it to a json file.<commit_after>import csv
import json
import collections
data = collections.defaultdict(dict)
def main():
with open('en-us.csv', 'rb') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
keys = row[0].split('.')
merge(data, format_dict(keys, row[1]))
f = open('en-us.json', 'w')
f.write(json.dumps(data, indent=4))
def format_dict(keys, value):
if len(keys) == 1:
return {keys[0]: value}
return {keys[0]: format_dict(keys[1:], value)}
# h/t: http://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth
def merge(d, u):
for key, value in u.iteritems():
if isinstance(value, collections.Mapping):
r = merge(d.get(key, {}), value)
d[key] = r
else:
d[key] = u[key]
return d
if __name__ == '__main__':
main()
|
|
fb19d7753073fb51da5a6fa1d458a3c8bca1d6b4
|
coltrane/context_processors.py
|
coltrane/context_processors.py
|
"""
A context processor which adds the value of the
``COMMENTS_MODERATE_AFTER`` setting to each ``RequestContext`` in
which it is applied.
"""
from template_utils.context_processors import settings_processor
comment_moderation = settings_processor('COMMENTS_MODERATE_AFTER')
|
Add a context processor for the COMMENTS_MODERATE_AFTER setting
|
Add a context processor for the COMMENTS_MODERATE_AFTER setting
git-svn-id: 9770886a22906f523ce26b0ad22db0fc46e41232@69 5f8205a5-902a-0410-8b63-8f478ce83d95
|
Python
|
bsd-3-clause
|
clones/django-coltrane,mafix/coltrane-blog
|
Add a context processor for the COMMENTS_MODERATE_AFTER setting
git-svn-id: 9770886a22906f523ce26b0ad22db0fc46e41232@69 5f8205a5-902a-0410-8b63-8f478ce83d95
|
"""
A context processor which adds the value of the
``COMMENTS_MODERATE_AFTER`` setting to each ``RequestContext`` in
which it is applied.
"""
from template_utils.context_processors import settings_processor
comment_moderation = settings_processor('COMMENTS_MODERATE_AFTER')
|
<commit_before><commit_msg>Add a context processor for the COMMENTS_MODERATE_AFTER setting
git-svn-id: 9770886a22906f523ce26b0ad22db0fc46e41232@69 5f8205a5-902a-0410-8b63-8f478ce83d95<commit_after>
|
"""
A context processor which adds the value of the
``COMMENTS_MODERATE_AFTER`` setting to each ``RequestContext`` in
which it is applied.
"""
from template_utils.context_processors import settings_processor
comment_moderation = settings_processor('COMMENTS_MODERATE_AFTER')
|
Add a context processor for the COMMENTS_MODERATE_AFTER setting
git-svn-id: 9770886a22906f523ce26b0ad22db0fc46e41232@69 5f8205a5-902a-0410-8b63-8f478ce83d95"""
A context processor which adds the value of the
``COMMENTS_MODERATE_AFTER`` setting to each ``RequestContext`` in
which it is applied.
"""
from template_utils.context_processors import settings_processor
comment_moderation = settings_processor('COMMENTS_MODERATE_AFTER')
|
<commit_before><commit_msg>Add a context processor for the COMMENTS_MODERATE_AFTER setting
git-svn-id: 9770886a22906f523ce26b0ad22db0fc46e41232@69 5f8205a5-902a-0410-8b63-8f478ce83d95<commit_after>"""
A context processor which adds the value of the
``COMMENTS_MODERATE_AFTER`` setting to each ``RequestContext`` in
which it is applied.
"""
from template_utils.context_processors import settings_processor
comment_moderation = settings_processor('COMMENTS_MODERATE_AFTER')
|
|
512c730f78fae715a3ed9dfe59ef917a5566dcb5
|
src/dashboard/src/main/migrations/0048_fix_upload_qubit_setting.py
|
src/dashboard/src/main/migrations/0048_fix_upload_qubit_setting.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def data_migration(apps, schema_editor):
"""Migration 0032 inadvertently set previously non-existent "Upload DIP to
AtoM" values to 'None'. This fixes that.
"""
apps.get_model('main', 'DashboardSetting').objects.filter(
scope='upload-qubit_v0.0', value='None').update(value='')
class Migration(migrations.Migration):
dependencies = [
('main', '0047_version_number'),
]
operations = [
migrations.RunPython(data_migration)
]
|
Add migration to fix "None" AtoM config issue
|
Add migration to fix "None" AtoM config issue
Fixes an issue introduced by migration 0032 wherein an AtoM setting that did
not exist in the previous state of the database was inadvertently created with
a value of 'None'. The migration introduced here changes those 'None's
to ''s.
Fixes #879.
|
Python
|
agpl-3.0
|
artefactual/archivematica,artefactual/archivematica,artefactual/archivematica,artefactual/archivematica
|
Add migration to fix "None" AtoM config issue
Fixes an issue introduced by migration 0032 wherein an AtoM setting that did
not exist in the previous state of the database was inadvertently created with
a value of 'None'. The migration introduced here changes those 'None's
to ''s.
Fixes #879.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def data_migration(apps, schema_editor):
"""Migration 0032 inadvertently set previously non-existent "Upload DIP to
AtoM" values to 'None'. This fixes that.
"""
apps.get_model('main', 'DashboardSetting').objects.filter(
scope='upload-qubit_v0.0', value='None').update(value='')
class Migration(migrations.Migration):
dependencies = [
('main', '0047_version_number'),
]
operations = [
migrations.RunPython(data_migration)
]
|
<commit_before><commit_msg>Add migration to fix "None" AtoM config issue
Fixes an issue introduced by migration 0032 wherein an AtoM setting that did
not exist in the previous state of the database was inadvertently created with
a value of 'None'. The migration introduced here changes those 'None's
to ''s.
Fixes #879.<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def data_migration(apps, schema_editor):
"""Migration 0032 inadvertently set previously non-existent "Upload DIP to
AtoM" values to 'None'. This fixes that.
"""
apps.get_model('main', 'DashboardSetting').objects.filter(
scope='upload-qubit_v0.0', value='None').update(value='')
class Migration(migrations.Migration):
dependencies = [
('main', '0047_version_number'),
]
operations = [
migrations.RunPython(data_migration)
]
|
Add migration to fix "None" AtoM config issue
Fixes an issue introduced by migration 0032 wherein an AtoM setting that did
not exist in the previous state of the database was inadvertently created with
a value of 'None'. The migration introduced here changes those 'None's
to ''s.
Fixes #879.# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def data_migration(apps, schema_editor):
"""Migration 0032 inadvertently set previously non-existent "Upload DIP to
AtoM" values to 'None'. This fixes that.
"""
apps.get_model('main', 'DashboardSetting').objects.filter(
scope='upload-qubit_v0.0', value='None').update(value='')
class Migration(migrations.Migration):
dependencies = [
('main', '0047_version_number'),
]
operations = [
migrations.RunPython(data_migration)
]
|
<commit_before><commit_msg>Add migration to fix "None" AtoM config issue
Fixes an issue introduced by migration 0032 wherein an AtoM setting that did
not exist in the previous state of the database was inadvertently created with
a value of 'None'. The migration introduced here changes those 'None's
to ''s.
Fixes #879.<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def data_migration(apps, schema_editor):
"""Migration 0032 inadvertently set previously non-existent "Upload DIP to
AtoM" values to 'None'. This fixes that.
"""
apps.get_model('main', 'DashboardSetting').objects.filter(
scope='upload-qubit_v0.0', value='None').update(value='')
class Migration(migrations.Migration):
dependencies = [
('main', '0047_version_number'),
]
operations = [
migrations.RunPython(data_migration)
]
|
|
1c3141488f13819d76ca179984aed0cadf932d5c
|
tests/test_grids.py
|
tests/test_grids.py
|
#!/usr/bin/env python3
from __future__ import print_function, division
import numpy as np
from sht.grids import standard_grid, get_cartesian_grid
def test_grids():
L = 10
thetas, phis = standard_grid(L)
# Can't really test much here
assert thetas.size == L
assert phis.size == L**2
grid = get_cartesian_grid(thetas, phis)
assert grid.shape == (L**2, 3)
|
Add a test for grids.
|
Add a test for grids.
|
Python
|
mit
|
praveenv253/sht,praveenv253/sht
|
Add a test for grids.
|
#!/usr/bin/env python3
from __future__ import print_function, division
import numpy as np
from sht.grids import standard_grid, get_cartesian_grid
def test_grids():
L = 10
thetas, phis = standard_grid(L)
# Can't really test much here
assert thetas.size == L
assert phis.size == L**2
grid = get_cartesian_grid(thetas, phis)
assert grid.shape == (L**2, 3)
|
<commit_before><commit_msg>Add a test for grids.<commit_after>
|
#!/usr/bin/env python3
from __future__ import print_function, division
import numpy as np
from sht.grids import standard_grid, get_cartesian_grid
def test_grids():
L = 10
thetas, phis = standard_grid(L)
# Can't really test much here
assert thetas.size == L
assert phis.size == L**2
grid = get_cartesian_grid(thetas, phis)
assert grid.shape == (L**2, 3)
|
Add a test for grids.#!/usr/bin/env python3
from __future__ import print_function, division
import numpy as np
from sht.grids import standard_grid, get_cartesian_grid
def test_grids():
L = 10
thetas, phis = standard_grid(L)
# Can't really test much here
assert thetas.size == L
assert phis.size == L**2
grid = get_cartesian_grid(thetas, phis)
assert grid.shape == (L**2, 3)
|
<commit_before><commit_msg>Add a test for grids.<commit_after>#!/usr/bin/env python3
from __future__ import print_function, division
import numpy as np
from sht.grids import standard_grid, get_cartesian_grid
def test_grids():
L = 10
thetas, phis = standard_grid(L)
# Can't really test much here
assert thetas.size == L
assert phis.size == L**2
grid = get_cartesian_grid(thetas, phis)
assert grid.shape == (L**2, 3)
|
|
a173a4f2922fe0f2b9e9f5162ceb947681c837ae
|
tests/test_oauth.py
|
tests/test_oauth.py
|
from nose.tools import *
from website.oauth.models import ExternalProvider
from website.oauth.models import OAUTH1
from website.oauth.models import OAUTH2
from tests.base import OsfTestCase
class MockOauth2Provider(ExternalProvider):
name = "Mock OAuth 2.0 Provider"
short_name = "mock2"
client_id = 'mock2_client_id'
client_secret = 'mock2_client_secret'
auth_url_base = 'http://mock2.com/auth'
callback_url = 'http://mock2.com/callback'
def handle_callback(self, data):
pass
class TestExternalProvider(OsfTestCase):
def test_instantiate(self):
mock = MockOauth2Provider()
def test_oauth_version_default(self):
mock = MockOauth2Provider()
assert_is(mock._oauth_version, OAUTH2)
|
Add very basic tests for OAuth
|
Add very basic tests for OAuth
|
Python
|
apache-2.0
|
billyhunt/osf.io,SSJohns/osf.io,Nesiehr/osf.io,cosenal/osf.io,leb2dg/osf.io,Johnetordoff/osf.io,laurenrevere/osf.io,ticklemepierce/osf.io,erinspace/osf.io,brianjgeiger/osf.io,fabianvf/osf.io,Nesiehr/osf.io,samchrisinger/osf.io,abought/osf.io,ticklemepierce/osf.io,TomBaxter/osf.io,danielneis/osf.io,hmoco/osf.io,ZobairAlijan/osf.io,felliott/osf.io,samchrisinger/osf.io,kch8qx/osf.io,haoyuchen1992/osf.io,jolene-esposito/osf.io,billyhunt/osf.io,sloria/osf.io,haoyuchen1992/osf.io,alexschiller/osf.io,HarryRybacki/osf.io,kushG/osf.io,caseyrollins/osf.io,lamdnhan/osf.io,dplorimer/osf,CenterForOpenScience/osf.io,reinaH/osf.io,bdyetton/prettychart,kch8qx/osf.io,crcresearch/osf.io,billyhunt/osf.io,leb2dg/osf.io,GaryKriebel/osf.io,fabianvf/osf.io,mfraezz/osf.io,ckc6cz/osf.io,asanfilippo7/osf.io,mluo613/osf.io,revanthkolli/osf.io,cosenal/osf.io,chrisseto/osf.io,caneruguz/osf.io,zkraime/osf.io,arpitar/osf.io,bdyetton/prettychart,mluo613/osf.io,samchrisinger/osf.io,caseyrygt/osf.io,wearpants/osf.io,brandonPurvis/osf.io,arpitar/osf.io,alexschiller/osf.io,baylee-d/osf.io,wearpants/osf.io,danielneis/osf.io,binoculars/osf.io,rdhyee/osf.io,barbour-em/osf.io,revanthkolli/osf.io,TomHeatwole/osf.io,SSJohns/osf.io,chennan47/osf.io,cldershem/osf.io,cldershem/osf.io,caneruguz/osf.io,SSJohns/osf.io,lyndsysimon/osf.io,caseyrygt/osf.io,binoculars/osf.io,doublebits/osf.io,sbt9uc/osf.io,kch8qx/osf.io,acshi/osf.io,MerlinZhang/osf.io,mattclark/osf.io,TomHeatwole/osf.io,pattisdr/osf.io,petermalcolm/osf.io,billyhunt/osf.io,dplorimer/osf,lamdnhan/osf.io,mluo613/osf.io,jolene-esposito/osf.io,jmcarp/osf.io,icereval/osf.io,KAsante95/osf.io,KAsante95/osf.io,himanshuo/osf.io,MerlinZhang/osf.io,adlius/osf.io,caseyrygt/osf.io,TomBaxter/osf.io,danielneis/osf.io,ticklemepierce/osf.io,RomanZWang/osf.io,Ghalko/osf.io,kushG/osf.io,MerlinZhang/osf.io,pattisdr/osf.io,sbt9uc/osf.io,njantrania/osf.io,sloria/osf.io,monikagrabowska/osf.io,GageGaskins/osf.io,cslzchen/osf.io,jnayak1/osf.io,aaxelb/osf.io,bdyetton/prettychart,CenterForOpenScience/osf.io,pattisdr/osf.io,KAsante95/osf.io,cosenal/osf.io,mfraezz/osf.io,dplorimer/osf,brandonPurvis/osf.io,jinluyuan/osf.io,samanehsan/osf.io,jmcarp/osf.io,Nesiehr/osf.io,revanthkolli/osf.io,sbt9uc/osf.io,barbour-em/osf.io,kushG/osf.io,GageGaskins/osf.io,jeffreyliu3230/osf.io,DanielSBrown/osf.io,RomanZWang/osf.io,adlius/osf.io,HalcyonChimera/osf.io,ZobairAlijan/osf.io,reinaH/osf.io,adlius/osf.io,amyshi188/osf.io,zamattiac/osf.io,wearpants/osf.io,barbour-em/osf.io,TomBaxter/osf.io,jnayak1/osf.io,reinaH/osf.io,ticklemepierce/osf.io,laurenrevere/osf.io,MerlinZhang/osf.io,chrisseto/osf.io,cwisecarver/osf.io,jnayak1/osf.io,Johnetordoff/osf.io,chrisseto/osf.io,chennan47/osf.io,mluke93/osf.io,DanielSBrown/osf.io,njantrania/osf.io,brandonPurvis/osf.io,njantrania/osf.io,KAsante95/osf.io,CenterForOpenScience/osf.io,lyndsysimon/osf.io,brandonPurvis/osf.io,emetsger/osf.io,zamattiac/osf.io,petermalcolm/osf.io,kwierman/osf.io,jeffreyliu3230/osf.io,GageGaskins/osf.io,caneruguz/osf.io,DanielSBrown/osf.io,monikagrabowska/osf.io,acshi/osf.io,RomanZWang/osf.io,sbt9uc/osf.io,crcresearch/osf.io,felliott/osf.io,jinluyuan/osf.io,TomHeatwole/osf.io,cldershem/osf.io,zamattiac/osf.io,zkraime/osf.io,brandonPurvis/osf.io,RomanZWang/osf.io,aaxelb/osf.io,abought/osf.io,cslzchen/osf.io,rdhyee/osf.io,jinluyuan/osf.io,HarryRybacki/osf.io,jinluyuan/osf.io,caseyrollins/osf.io,danielneis/osf.io,leb2dg/osf.io,himanshuo/osf.io,amyshi188/osf.io,TomHeatwole/osf.io,amyshi188/osf.io,billyhunt/osf.io,emetsger/osf.io,RomanZWang/osf.io,dplorimer/osf,kch8qx/osf.io,brianjgeiger/osf.io,cosenal/osf.io,Ghalko/osf.io,zkraime/osf.io,aaxelb/osf.io,abought/osf.io,cslzchen/osf.io,hmoco/osf.io,acshi/osf.io,mluo613/osf.io,monikagrabowska/osf.io,abought/osf.io,bdyetton/prettychart,mattclark/osf.io,brianjgeiger/osf.io,GaryKriebel/osf.io,GaryKriebel/osf.io,lamdnhan/osf.io,njantrania/osf.io,Ghalko/osf.io,zkraime/osf.io,rdhyee/osf.io,cwisecarver/osf.io,zachjanicki/osf.io,zachjanicki/osf.io,asanfilippo7/osf.io,baylee-d/osf.io,jmcarp/osf.io,petermalcolm/osf.io,SSJohns/osf.io,haoyuchen1992/osf.io,caseyrygt/osf.io,fabianvf/osf.io,reinaH/osf.io,HalcyonChimera/osf.io,adlius/osf.io,doublebits/osf.io,jeffreyliu3230/osf.io,GageGaskins/osf.io,jeffreyliu3230/osf.io,mfraezz/osf.io,crcresearch/osf.io,monikagrabowska/osf.io,alexschiller/osf.io,monikagrabowska/osf.io,mattclark/osf.io,emetsger/osf.io,samchrisinger/osf.io,revanthkolli/osf.io,samanehsan/osf.io,asanfilippo7/osf.io,baylee-d/osf.io,mluke93/osf.io,KAsante95/osf.io,kwierman/osf.io,jolene-esposito/osf.io,saradbowman/osf.io,kwierman/osf.io,Johnetordoff/osf.io,himanshuo/osf.io,alexschiller/osf.io,alexschiller/osf.io,acshi/osf.io,barbour-em/osf.io,kushG/osf.io,ckc6cz/osf.io,mfraezz/osf.io,icereval/osf.io,icereval/osf.io,Johnetordoff/osf.io,felliott/osf.io,cslzchen/osf.io,aaxelb/osf.io,chennan47/osf.io,GaryKriebel/osf.io,laurenrevere/osf.io,caneruguz/osf.io,DanielSBrown/osf.io,lyndsysimon/osf.io,acshi/osf.io,kch8qx/osf.io,emetsger/osf.io,fabianvf/osf.io,binoculars/osf.io,ZobairAlijan/osf.io,amyshi188/osf.io,arpitar/osf.io,Ghalko/osf.io,CenterForOpenScience/osf.io,saradbowman/osf.io,doublebits/osf.io,wearpants/osf.io,mluke93/osf.io,asanfilippo7/osf.io,Nesiehr/osf.io,felliott/osf.io,haoyuchen1992/osf.io,jolene-esposito/osf.io,rdhyee/osf.io,arpitar/osf.io,caseyrollins/osf.io,GageGaskins/osf.io,cwisecarver/osf.io,chrisseto/osf.io,lamdnhan/osf.io,zamattiac/osf.io,himanshuo/osf.io,hmoco/osf.io,HarryRybacki/osf.io,mluke93/osf.io,hmoco/osf.io,zachjanicki/osf.io,mluo613/osf.io,cldershem/osf.io,kwierman/osf.io,erinspace/osf.io,lyndsysimon/osf.io,HarryRybacki/osf.io,leb2dg/osf.io,ckc6cz/osf.io,jnayak1/osf.io,ZobairAlijan/osf.io,doublebits/osf.io,petermalcolm/osf.io,ckc6cz/osf.io,jmcarp/osf.io,samanehsan/osf.io,cwisecarver/osf.io,samanehsan/osf.io,HalcyonChimera/osf.io,zachjanicki/osf.io,doublebits/osf.io,sloria/osf.io,erinspace/osf.io,brianjgeiger/osf.io,HalcyonChimera/osf.io
|
Add very basic tests for OAuth
|
from nose.tools import *
from website.oauth.models import ExternalProvider
from website.oauth.models import OAUTH1
from website.oauth.models import OAUTH2
from tests.base import OsfTestCase
class MockOauth2Provider(ExternalProvider):
name = "Mock OAuth 2.0 Provider"
short_name = "mock2"
client_id = 'mock2_client_id'
client_secret = 'mock2_client_secret'
auth_url_base = 'http://mock2.com/auth'
callback_url = 'http://mock2.com/callback'
def handle_callback(self, data):
pass
class TestExternalProvider(OsfTestCase):
def test_instantiate(self):
mock = MockOauth2Provider()
def test_oauth_version_default(self):
mock = MockOauth2Provider()
assert_is(mock._oauth_version, OAUTH2)
|
<commit_before><commit_msg>Add very basic tests for OAuth<commit_after>
|
from nose.tools import *
from website.oauth.models import ExternalProvider
from website.oauth.models import OAUTH1
from website.oauth.models import OAUTH2
from tests.base import OsfTestCase
class MockOauth2Provider(ExternalProvider):
name = "Mock OAuth 2.0 Provider"
short_name = "mock2"
client_id = 'mock2_client_id'
client_secret = 'mock2_client_secret'
auth_url_base = 'http://mock2.com/auth'
callback_url = 'http://mock2.com/callback'
def handle_callback(self, data):
pass
class TestExternalProvider(OsfTestCase):
def test_instantiate(self):
mock = MockOauth2Provider()
def test_oauth_version_default(self):
mock = MockOauth2Provider()
assert_is(mock._oauth_version, OAUTH2)
|
Add very basic tests for OAuthfrom nose.tools import *
from website.oauth.models import ExternalProvider
from website.oauth.models import OAUTH1
from website.oauth.models import OAUTH2
from tests.base import OsfTestCase
class MockOauth2Provider(ExternalProvider):
name = "Mock OAuth 2.0 Provider"
short_name = "mock2"
client_id = 'mock2_client_id'
client_secret = 'mock2_client_secret'
auth_url_base = 'http://mock2.com/auth'
callback_url = 'http://mock2.com/callback'
def handle_callback(self, data):
pass
class TestExternalProvider(OsfTestCase):
def test_instantiate(self):
mock = MockOauth2Provider()
def test_oauth_version_default(self):
mock = MockOauth2Provider()
assert_is(mock._oauth_version, OAUTH2)
|
<commit_before><commit_msg>Add very basic tests for OAuth<commit_after>from nose.tools import *
from website.oauth.models import ExternalProvider
from website.oauth.models import OAUTH1
from website.oauth.models import OAUTH2
from tests.base import OsfTestCase
class MockOauth2Provider(ExternalProvider):
name = "Mock OAuth 2.0 Provider"
short_name = "mock2"
client_id = 'mock2_client_id'
client_secret = 'mock2_client_secret'
auth_url_base = 'http://mock2.com/auth'
callback_url = 'http://mock2.com/callback'
def handle_callback(self, data):
pass
class TestExternalProvider(OsfTestCase):
def test_instantiate(self):
mock = MockOauth2Provider()
def test_oauth_version_default(self):
mock = MockOauth2Provider()
assert_is(mock._oauth_version, OAUTH2)
|
|
443c35be358abc35394a9b3bbde2a79289db818e
|
tests/test_utils.py
|
tests/test_utils.py
|
# -*- coding: utf-8 -*-
# vim: set ts=4
# Copyright 2016 Rémi Duraffort
# This file is part of ReactOBus.
#
# ReactOBus is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ReactOBus is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with ReactOBus. If not, see <http://www.gnu.org/licenses/>
from ReactOBus.utils import Pipe
import pytest
class PipeTest(Pipe):
classname = "PipeTest"
def __init__(self, name, options, inbound):
pass
class PipeSubTest(PipeTest):
classname = "PipeSubTest"
def test_pipe():
p = Pipe.select("PipeTest", "testing", {}, '')
assert isinstance(p, PipeTest)
p = Pipe.select("PipeSubTest", "testing", {}, '')
assert isinstance(p, PipeSubTest)
with pytest.raises(NotImplementedError):
Pipe.select("TestClass", "test", {}, '')
p = Pipe()
with pytest.raises(NotImplementedError):
p.setup()
with pytest.raises(NotImplementedError):
p.run()
|
Test the Pipe base class
|
Test the Pipe base class
|
Python
|
agpl-3.0
|
ivoire/ReactOBus,ivoire/ReactOBus
|
Test the Pipe base class
|
# -*- coding: utf-8 -*-
# vim: set ts=4
# Copyright 2016 Rémi Duraffort
# This file is part of ReactOBus.
#
# ReactOBus is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ReactOBus is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with ReactOBus. If not, see <http://www.gnu.org/licenses/>
from ReactOBus.utils import Pipe
import pytest
class PipeTest(Pipe):
classname = "PipeTest"
def __init__(self, name, options, inbound):
pass
class PipeSubTest(PipeTest):
classname = "PipeSubTest"
def test_pipe():
p = Pipe.select("PipeTest", "testing", {}, '')
assert isinstance(p, PipeTest)
p = Pipe.select("PipeSubTest", "testing", {}, '')
assert isinstance(p, PipeSubTest)
with pytest.raises(NotImplementedError):
Pipe.select("TestClass", "test", {}, '')
p = Pipe()
with pytest.raises(NotImplementedError):
p.setup()
with pytest.raises(NotImplementedError):
p.run()
|
<commit_before><commit_msg>Test the Pipe base class<commit_after>
|
# -*- coding: utf-8 -*-
# vim: set ts=4
# Copyright 2016 Rémi Duraffort
# This file is part of ReactOBus.
#
# ReactOBus is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ReactOBus is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with ReactOBus. If not, see <http://www.gnu.org/licenses/>
from ReactOBus.utils import Pipe
import pytest
class PipeTest(Pipe):
classname = "PipeTest"
def __init__(self, name, options, inbound):
pass
class PipeSubTest(PipeTest):
classname = "PipeSubTest"
def test_pipe():
p = Pipe.select("PipeTest", "testing", {}, '')
assert isinstance(p, PipeTest)
p = Pipe.select("PipeSubTest", "testing", {}, '')
assert isinstance(p, PipeSubTest)
with pytest.raises(NotImplementedError):
Pipe.select("TestClass", "test", {}, '')
p = Pipe()
with pytest.raises(NotImplementedError):
p.setup()
with pytest.raises(NotImplementedError):
p.run()
|
Test the Pipe base class# -*- coding: utf-8 -*-
# vim: set ts=4
# Copyright 2016 Rémi Duraffort
# This file is part of ReactOBus.
#
# ReactOBus is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ReactOBus is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with ReactOBus. If not, see <http://www.gnu.org/licenses/>
from ReactOBus.utils import Pipe
import pytest
class PipeTest(Pipe):
classname = "PipeTest"
def __init__(self, name, options, inbound):
pass
class PipeSubTest(PipeTest):
classname = "PipeSubTest"
def test_pipe():
p = Pipe.select("PipeTest", "testing", {}, '')
assert isinstance(p, PipeTest)
p = Pipe.select("PipeSubTest", "testing", {}, '')
assert isinstance(p, PipeSubTest)
with pytest.raises(NotImplementedError):
Pipe.select("TestClass", "test", {}, '')
p = Pipe()
with pytest.raises(NotImplementedError):
p.setup()
with pytest.raises(NotImplementedError):
p.run()
|
<commit_before><commit_msg>Test the Pipe base class<commit_after># -*- coding: utf-8 -*-
# vim: set ts=4
# Copyright 2016 Rémi Duraffort
# This file is part of ReactOBus.
#
# ReactOBus is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ReactOBus is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with ReactOBus. If not, see <http://www.gnu.org/licenses/>
from ReactOBus.utils import Pipe
import pytest
class PipeTest(Pipe):
classname = "PipeTest"
def __init__(self, name, options, inbound):
pass
class PipeSubTest(PipeTest):
classname = "PipeSubTest"
def test_pipe():
p = Pipe.select("PipeTest", "testing", {}, '')
assert isinstance(p, PipeTest)
p = Pipe.select("PipeSubTest", "testing", {}, '')
assert isinstance(p, PipeSubTest)
with pytest.raises(NotImplementedError):
Pipe.select("TestClass", "test", {}, '')
p = Pipe()
with pytest.raises(NotImplementedError):
p.setup()
with pytest.raises(NotImplementedError):
p.run()
|
|
bda9c236c978601ea8c62c60488afaecc03f384f
|
test/test_relative_relationships.py
|
test/test_relative_relationships.py
|
from neomodel import StructuredNode, RelationshipTo, StringProperty
from .test_relationships import Country
class Cat(StructuredNode):
name = StringProperty()
is_from = RelationshipTo('.test_relationships.Country', 'IS_FROM')
def test_relative_relationship():
a = Cat(name='snufkin').save()
assert a
c = Country(code='MG').save()
assert c
a.is_from.connect(c)
assert a.is_from.is_connected(c)
|
Add test for relative imports
|
Add test for relative imports
|
Python
|
mit
|
pombredanne/neomodel,fpieper/neomodel,bleib1dj/neomodel,cristigociu/neomodel_dh,robinedwards/neomodel,wcooley/neomodel,andrefsp/neomodel,robinedwards/neomodel,bleib1dj/neomodel
|
Add test for relative imports
|
from neomodel import StructuredNode, RelationshipTo, StringProperty
from .test_relationships import Country
class Cat(StructuredNode):
name = StringProperty()
is_from = RelationshipTo('.test_relationships.Country', 'IS_FROM')
def test_relative_relationship():
a = Cat(name='snufkin').save()
assert a
c = Country(code='MG').save()
assert c
a.is_from.connect(c)
assert a.is_from.is_connected(c)
|
<commit_before><commit_msg>Add test for relative imports<commit_after>
|
from neomodel import StructuredNode, RelationshipTo, StringProperty
from .test_relationships import Country
class Cat(StructuredNode):
name = StringProperty()
is_from = RelationshipTo('.test_relationships.Country', 'IS_FROM')
def test_relative_relationship():
a = Cat(name='snufkin').save()
assert a
c = Country(code='MG').save()
assert c
a.is_from.connect(c)
assert a.is_from.is_connected(c)
|
Add test for relative importsfrom neomodel import StructuredNode, RelationshipTo, StringProperty
from .test_relationships import Country
class Cat(StructuredNode):
name = StringProperty()
is_from = RelationshipTo('.test_relationships.Country', 'IS_FROM')
def test_relative_relationship():
a = Cat(name='snufkin').save()
assert a
c = Country(code='MG').save()
assert c
a.is_from.connect(c)
assert a.is_from.is_connected(c)
|
<commit_before><commit_msg>Add test for relative imports<commit_after>from neomodel import StructuredNode, RelationshipTo, StringProperty
from .test_relationships import Country
class Cat(StructuredNode):
name = StringProperty()
is_from = RelationshipTo('.test_relationships.Country', 'IS_FROM')
def test_relative_relationship():
a = Cat(name='snufkin').save()
assert a
c = Country(code='MG').save()
assert c
a.is_from.connect(c)
assert a.is_from.is_connected(c)
|
|
b17b0f82b94aaba1f37b68d0660873a3c4329f8b
|
tests/test_inventory/test_models.py
|
tests/test_inventory/test_models.py
|
import pytest
from labsys.inventory.models import (
Product, StockProduct, Specification, OrderItem, Order, Transaction
)
'''
TODO: Tests
Product
- must know how many there are in stock
- must retrieve its associated specifications
'''
|
Add dummy models test file
|
:memo: Add dummy models test file
|
Python
|
mit
|
gems-uff/labsys,gems-uff/labsys,gems-uff/labsys
|
:memo: Add dummy models test file
|
import pytest
from labsys.inventory.models import (
Product, StockProduct, Specification, OrderItem, Order, Transaction
)
'''
TODO: Tests
Product
- must know how many there are in stock
- must retrieve its associated specifications
'''
|
<commit_before><commit_msg>:memo: Add dummy models test file<commit_after>
|
import pytest
from labsys.inventory.models import (
Product, StockProduct, Specification, OrderItem, Order, Transaction
)
'''
TODO: Tests
Product
- must know how many there are in stock
- must retrieve its associated specifications
'''
|
:memo: Add dummy models test fileimport pytest
from labsys.inventory.models import (
Product, StockProduct, Specification, OrderItem, Order, Transaction
)
'''
TODO: Tests
Product
- must know how many there are in stock
- must retrieve its associated specifications
'''
|
<commit_before><commit_msg>:memo: Add dummy models test file<commit_after>import pytest
from labsys.inventory.models import (
Product, StockProduct, Specification, OrderItem, Order, Transaction
)
'''
TODO: Tests
Product
- must know how many there are in stock
- must retrieve its associated specifications
'''
|
|
5f385913ab06fc288c61d22d98f2f9a903194f8f
|
data_structures/Stack/Python/Stack.py
|
data_structures/Stack/Python/Stack.py
|
# Author: AlexBanks97
# Purpose: LIFO Stack implementation using python array.
# Date: October 15th 2017
class Stack(object):
def __init__(self):
# Initialize stack as empty array
self.stack = []
# Return and remove the last element of the stack array.
def pop(self):
# If the stack is not empty, pop.
if self.stack.length > 0:
return self.stack.pop()
|
# Author: AlexBanks97
# Purpose: LIFO Stack implementation using python array.
# Date: October 15th 2017
class Stack(object):
def __init__(self):
# Initialize stack as empty array
self.stack = []
# Return and remove the last element of the stack array.
def pop(self):
# If the stack is not empty, pop.
if self.stack.length > 0:
return self.stack.pop()
# Add an element to the end of the stack array.
def push(self, element):
self.stack.append(element)
|
Add push method and implementation
|
Add push method and implementation
|
Python
|
cc0-1.0
|
manikTharaka/al-go-rithms,ZoranPandovski/al-go-rithms,Cnidarias/al-go-rithms,EUNIX-TRIX/al-go-rithms,Cnidarias/al-go-rithms,Deepak345/al-go-rithms,manikTharaka/al-go-rithms,manikTharaka/al-go-rithms,Cnidarias/al-go-rithms,ZoranPandovski/al-go-rithms,manikTharaka/al-go-rithms,manikTharaka/al-go-rithms,Cnidarias/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,EUNIX-TRIX/al-go-rithms,Cnidarias/al-go-rithms,ZoranPandovski/al-go-rithms,manikTharaka/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,Deepak345/al-go-rithms,manikTharaka/al-go-rithms,ZoranPandovski/al-go-rithms,manikTharaka/al-go-rithms,Cnidarias/al-go-rithms,Deepak345/al-go-rithms,Deepak345/al-go-rithms,ZoranPandovski/al-go-rithms,Deepak345/al-go-rithms,EUNIX-TRIX/al-go-rithms,EUNIX-TRIX/al-go-rithms,Deepak345/al-go-rithms,manikTharaka/al-go-rithms,manikTharaka/al-go-rithms,Deepak345/al-go-rithms,ZoranPandovski/al-go-rithms,Deepak345/al-go-rithms,Cnidarias/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,Deepak345/al-go-rithms,manikTharaka/al-go-rithms,EUNIX-TRIX/al-go-rithms,manikTharaka/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,Cnidarias/al-go-rithms,Cnidarias/al-go-rithms,manikTharaka/al-go-rithms,Cnidarias/al-go-rithms,EUNIX-TRIX/al-go-rithms,Deepak345/al-go-rithms,manikTharaka/al-go-rithms,ZoranPandovski/al-go-rithms,EUNIX-TRIX/al-go-rithms,ZoranPandovski/al-go-rithms,Cnidarias/al-go-rithms,Deepak345/al-go-rithms,Cnidarias/al-go-rithms,EUNIX-TRIX/al-go-rithms,Deepak345/al-go-rithms,manikTharaka/al-go-rithms,Cnidarias/al-go-rithms,Deepak345/al-go-rithms,EUNIX-TRIX/al-go-rithms,EUNIX-TRIX/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,manikTharaka/al-go-rithms,EUNIX-TRIX/al-go-rithms,ZoranPandovski/al-go-rithms,EUNIX-TRIX/al-go-rithms,Deepak345/al-go-rithms,EUNIX-TRIX/al-go-rithms,Deepak345/al-go-rithms,Cnidarias/al-go-rithms,Cnidarias/al-go-rithms,ZoranPandovski/al-go-rithms,Deepak345/al-go-rithms
|
# Author: AlexBanks97
# Purpose: LIFO Stack implementation using python array.
# Date: October 15th 2017
class Stack(object):
def __init__(self):
# Initialize stack as empty array
self.stack = []
# Return and remove the last element of the stack array.
def pop(self):
# If the stack is not empty, pop.
if self.stack.length > 0:
return self.stack.pop() Add push method and implementation
|
# Author: AlexBanks97
# Purpose: LIFO Stack implementation using python array.
# Date: October 15th 2017
class Stack(object):
def __init__(self):
# Initialize stack as empty array
self.stack = []
# Return and remove the last element of the stack array.
def pop(self):
# If the stack is not empty, pop.
if self.stack.length > 0:
return self.stack.pop()
# Add an element to the end of the stack array.
def push(self, element):
self.stack.append(element)
|
<commit_before># Author: AlexBanks97
# Purpose: LIFO Stack implementation using python array.
# Date: October 15th 2017
class Stack(object):
def __init__(self):
# Initialize stack as empty array
self.stack = []
# Return and remove the last element of the stack array.
def pop(self):
# If the stack is not empty, pop.
if self.stack.length > 0:
return self.stack.pop() <commit_msg>Add push method and implementation<commit_after>
|
# Author: AlexBanks97
# Purpose: LIFO Stack implementation using python array.
# Date: October 15th 2017
class Stack(object):
def __init__(self):
# Initialize stack as empty array
self.stack = []
# Return and remove the last element of the stack array.
def pop(self):
# If the stack is not empty, pop.
if self.stack.length > 0:
return self.stack.pop()
# Add an element to the end of the stack array.
def push(self, element):
self.stack.append(element)
|
# Author: AlexBanks97
# Purpose: LIFO Stack implementation using python array.
# Date: October 15th 2017
class Stack(object):
def __init__(self):
# Initialize stack as empty array
self.stack = []
# Return and remove the last element of the stack array.
def pop(self):
# If the stack is not empty, pop.
if self.stack.length > 0:
return self.stack.pop() Add push method and implementation# Author: AlexBanks97
# Purpose: LIFO Stack implementation using python array.
# Date: October 15th 2017
class Stack(object):
def __init__(self):
# Initialize stack as empty array
self.stack = []
# Return and remove the last element of the stack array.
def pop(self):
# If the stack is not empty, pop.
if self.stack.length > 0:
return self.stack.pop()
# Add an element to the end of the stack array.
def push(self, element):
self.stack.append(element)
|
<commit_before># Author: AlexBanks97
# Purpose: LIFO Stack implementation using python array.
# Date: October 15th 2017
class Stack(object):
def __init__(self):
# Initialize stack as empty array
self.stack = []
# Return and remove the last element of the stack array.
def pop(self):
# If the stack is not empty, pop.
if self.stack.length > 0:
return self.stack.pop() <commit_msg>Add push method and implementation<commit_after># Author: AlexBanks97
# Purpose: LIFO Stack implementation using python array.
# Date: October 15th 2017
class Stack(object):
def __init__(self):
# Initialize stack as empty array
self.stack = []
# Return and remove the last element of the stack array.
def pop(self):
# If the stack is not empty, pop.
if self.stack.length > 0:
return self.stack.pop()
# Add an element to the end of the stack array.
def push(self, element):
self.stack.append(element)
|
552d88f497557bc449d98b7828b47dbba2ab0150
|
insertFrontMatter.py
|
insertFrontMatter.py
|
#!/usr/bin/python
import sys, os
def main(argv):
folder = argv[1]
project = argv[2]
# for each file in specified directory
for filename in os.listdir(folder):
# open a file and create a temp file to hold our alternation
oldfile = open(filename)
tmpFile = open("conversionFile.tmp","w+")
# allow us to keep track of the old front matter we don't want to copy
startFrontMatter = false;
endFrontMatter = false;
# saves relevant font matter from the old file to be used in the new file
fontMatter = {"title" = "", "description" = "", "version" = "", "weight" = ""}
# for each line in the old file
for line in oldFile:
# only write if we have established we're past the front matter
if (startFrontMatter && endFrontMatter):
tmpFile.write(line)
# save the previous front matter to a dictionary
elif (startFrontMatter):
if (line == "---"):
endFrontMatter = true
if (line.find("title: ")):
tmp = line.split(": ")
frontMatter["title"] = tmp[1]
elif (line.find("description: "):
tmp = line.split(": ")
frontMatter["description"] = tmp[1]
elif (line.find("version: "):
tmp = line.split(": ")
frontMatter["version"] = tmp[1]
elif (line.find("weight: "):
tmp = line.split(": ")
frontMatter["weight"] = tmp[1]
# check the first line of the file
elif (line == "---"):
startFrontMatter = true
# close the old file since we're done copying the content
oldFile.close()
# TODO:
# os.system("hugo new " + project +
# hugo new stuff
# os.system("cat " + oldFile + " >> " + newFile)
# fill in any blanks in the new hugo file front matter
if __name__ == "__main__":
if (len(sys.argv) == 3):
main(sys.argv)
else:
print "Please supply the following arg(s):"
print "1: The folder which you'd like to migrate"
print "2: The name of the project it belongs to"
sys.stdout.flush()
|
Add initial untested and unfinished front matter adding/converting script
|
Add initial untested and unfinished front matter adding/converting script
|
Python
|
mit
|
sensu/sensu-docs,sensu/sensu-docs,sensu/sensu-docs,sensu/sensu-docs,sensu/sensu-docs
|
Add initial untested and unfinished front matter adding/converting script
|
#!/usr/bin/python
import sys, os
def main(argv):
folder = argv[1]
project = argv[2]
# for each file in specified directory
for filename in os.listdir(folder):
# open a file and create a temp file to hold our alternation
oldfile = open(filename)
tmpFile = open("conversionFile.tmp","w+")
# allow us to keep track of the old front matter we don't want to copy
startFrontMatter = false;
endFrontMatter = false;
# saves relevant font matter from the old file to be used in the new file
fontMatter = {"title" = "", "description" = "", "version" = "", "weight" = ""}
# for each line in the old file
for line in oldFile:
# only write if we have established we're past the front matter
if (startFrontMatter && endFrontMatter):
tmpFile.write(line)
# save the previous front matter to a dictionary
elif (startFrontMatter):
if (line == "---"):
endFrontMatter = true
if (line.find("title: ")):
tmp = line.split(": ")
frontMatter["title"] = tmp[1]
elif (line.find("description: "):
tmp = line.split(": ")
frontMatter["description"] = tmp[1]
elif (line.find("version: "):
tmp = line.split(": ")
frontMatter["version"] = tmp[1]
elif (line.find("weight: "):
tmp = line.split(": ")
frontMatter["weight"] = tmp[1]
# check the first line of the file
elif (line == "---"):
startFrontMatter = true
# close the old file since we're done copying the content
oldFile.close()
# TODO:
# os.system("hugo new " + project +
# hugo new stuff
# os.system("cat " + oldFile + " >> " + newFile)
# fill in any blanks in the new hugo file front matter
if __name__ == "__main__":
if (len(sys.argv) == 3):
main(sys.argv)
else:
print "Please supply the following arg(s):"
print "1: The folder which you'd like to migrate"
print "2: The name of the project it belongs to"
sys.stdout.flush()
|
<commit_before><commit_msg>Add initial untested and unfinished front matter adding/converting script<commit_after>
|
#!/usr/bin/python
import sys, os
def main(argv):
folder = argv[1]
project = argv[2]
# for each file in specified directory
for filename in os.listdir(folder):
# open a file and create a temp file to hold our alternation
oldfile = open(filename)
tmpFile = open("conversionFile.tmp","w+")
# allow us to keep track of the old front matter we don't want to copy
startFrontMatter = false;
endFrontMatter = false;
# saves relevant font matter from the old file to be used in the new file
fontMatter = {"title" = "", "description" = "", "version" = "", "weight" = ""}
# for each line in the old file
for line in oldFile:
# only write if we have established we're past the front matter
if (startFrontMatter && endFrontMatter):
tmpFile.write(line)
# save the previous front matter to a dictionary
elif (startFrontMatter):
if (line == "---"):
endFrontMatter = true
if (line.find("title: ")):
tmp = line.split(": ")
frontMatter["title"] = tmp[1]
elif (line.find("description: "):
tmp = line.split(": ")
frontMatter["description"] = tmp[1]
elif (line.find("version: "):
tmp = line.split(": ")
frontMatter["version"] = tmp[1]
elif (line.find("weight: "):
tmp = line.split(": ")
frontMatter["weight"] = tmp[1]
# check the first line of the file
elif (line == "---"):
startFrontMatter = true
# close the old file since we're done copying the content
oldFile.close()
# TODO:
# os.system("hugo new " + project +
# hugo new stuff
# os.system("cat " + oldFile + " >> " + newFile)
# fill in any blanks in the new hugo file front matter
if __name__ == "__main__":
if (len(sys.argv) == 3):
main(sys.argv)
else:
print "Please supply the following arg(s):"
print "1: The folder which you'd like to migrate"
print "2: The name of the project it belongs to"
sys.stdout.flush()
|
Add initial untested and unfinished front matter adding/converting script#!/usr/bin/python
import sys, os
def main(argv):
folder = argv[1]
project = argv[2]
# for each file in specified directory
for filename in os.listdir(folder):
# open a file and create a temp file to hold our alternation
oldfile = open(filename)
tmpFile = open("conversionFile.tmp","w+")
# allow us to keep track of the old front matter we don't want to copy
startFrontMatter = false;
endFrontMatter = false;
# saves relevant font matter from the old file to be used in the new file
fontMatter = {"title" = "", "description" = "", "version" = "", "weight" = ""}
# for each line in the old file
for line in oldFile:
# only write if we have established we're past the front matter
if (startFrontMatter && endFrontMatter):
tmpFile.write(line)
# save the previous front matter to a dictionary
elif (startFrontMatter):
if (line == "---"):
endFrontMatter = true
if (line.find("title: ")):
tmp = line.split(": ")
frontMatter["title"] = tmp[1]
elif (line.find("description: "):
tmp = line.split(": ")
frontMatter["description"] = tmp[1]
elif (line.find("version: "):
tmp = line.split(": ")
frontMatter["version"] = tmp[1]
elif (line.find("weight: "):
tmp = line.split(": ")
frontMatter["weight"] = tmp[1]
# check the first line of the file
elif (line == "---"):
startFrontMatter = true
# close the old file since we're done copying the content
oldFile.close()
# TODO:
# os.system("hugo new " + project +
# hugo new stuff
# os.system("cat " + oldFile + " >> " + newFile)
# fill in any blanks in the new hugo file front matter
if __name__ == "__main__":
if (len(sys.argv) == 3):
main(sys.argv)
else:
print "Please supply the following arg(s):"
print "1: The folder which you'd like to migrate"
print "2: The name of the project it belongs to"
sys.stdout.flush()
|
<commit_before><commit_msg>Add initial untested and unfinished front matter adding/converting script<commit_after>#!/usr/bin/python
import sys, os
def main(argv):
folder = argv[1]
project = argv[2]
# for each file in specified directory
for filename in os.listdir(folder):
# open a file and create a temp file to hold our alternation
oldfile = open(filename)
tmpFile = open("conversionFile.tmp","w+")
# allow us to keep track of the old front matter we don't want to copy
startFrontMatter = false;
endFrontMatter = false;
# saves relevant font matter from the old file to be used in the new file
fontMatter = {"title" = "", "description" = "", "version" = "", "weight" = ""}
# for each line in the old file
for line in oldFile:
# only write if we have established we're past the front matter
if (startFrontMatter && endFrontMatter):
tmpFile.write(line)
# save the previous front matter to a dictionary
elif (startFrontMatter):
if (line == "---"):
endFrontMatter = true
if (line.find("title: ")):
tmp = line.split(": ")
frontMatter["title"] = tmp[1]
elif (line.find("description: "):
tmp = line.split(": ")
frontMatter["description"] = tmp[1]
elif (line.find("version: "):
tmp = line.split(": ")
frontMatter["version"] = tmp[1]
elif (line.find("weight: "):
tmp = line.split(": ")
frontMatter["weight"] = tmp[1]
# check the first line of the file
elif (line == "---"):
startFrontMatter = true
# close the old file since we're done copying the content
oldFile.close()
# TODO:
# os.system("hugo new " + project +
# hugo new stuff
# os.system("cat " + oldFile + " >> " + newFile)
# fill in any blanks in the new hugo file front matter
if __name__ == "__main__":
if (len(sys.argv) == 3):
main(sys.argv)
else:
print "Please supply the following arg(s):"
print "1: The folder which you'd like to migrate"
print "2: The name of the project it belongs to"
sys.stdout.flush()
|
|
792072f6e95101395f49f62ca6276eb287cbaf30
|
proxy.py
|
proxy.py
|
import sys
import socket
import threading
def server_loop(local_host, local_port, remote_host, remote_port,
receive_first):
"""
The proxy is represented by the local host and port. The remote host and
port represent the service's server.
"""
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
server.bind((local_host, local_port)) # argument is a tuple
except:
# I recomment using port 3000 or greater
print "[!!] Failed to listen on %s:%d" % (local_host, local_port)
print "[!!] Check for other listening sockets or correct permissions."
sys.exit(0)
print "[*] Listening on %s:%d" % (local_host, local_port)
server.listen(5)
while(True):
client_socket, addr = server.accept()
# print out info of the local socket assigned
print "[==>] Received incoming connection from %s:%d" % (
addr[0], addr[1])
# start a thread to talk to the remote host
proxy_thread = threading.Thread(
target=proxy_handler,
args=(client_socket, remote_host, remote_port, receive_first))
# check above that proxy_handler is a function defined before this call
# to threading.Thread
proxy_thread.start()
|
Add the main server loop
|
Add the main server loop
|
Python
|
mit
|
inakidelamadrid/bhp_exercises
|
Add the main server loop
|
import sys
import socket
import threading
def server_loop(local_host, local_port, remote_host, remote_port,
receive_first):
"""
The proxy is represented by the local host and port. The remote host and
port represent the service's server.
"""
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
server.bind((local_host, local_port)) # argument is a tuple
except:
# I recomment using port 3000 or greater
print "[!!] Failed to listen on %s:%d" % (local_host, local_port)
print "[!!] Check for other listening sockets or correct permissions."
sys.exit(0)
print "[*] Listening on %s:%d" % (local_host, local_port)
server.listen(5)
while(True):
client_socket, addr = server.accept()
# print out info of the local socket assigned
print "[==>] Received incoming connection from %s:%d" % (
addr[0], addr[1])
# start a thread to talk to the remote host
proxy_thread = threading.Thread(
target=proxy_handler,
args=(client_socket, remote_host, remote_port, receive_first))
# check above that proxy_handler is a function defined before this call
# to threading.Thread
proxy_thread.start()
|
<commit_before><commit_msg>Add the main server loop<commit_after>
|
import sys
import socket
import threading
def server_loop(local_host, local_port, remote_host, remote_port,
receive_first):
"""
The proxy is represented by the local host and port. The remote host and
port represent the service's server.
"""
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
server.bind((local_host, local_port)) # argument is a tuple
except:
# I recomment using port 3000 or greater
print "[!!] Failed to listen on %s:%d" % (local_host, local_port)
print "[!!] Check for other listening sockets or correct permissions."
sys.exit(0)
print "[*] Listening on %s:%d" % (local_host, local_port)
server.listen(5)
while(True):
client_socket, addr = server.accept()
# print out info of the local socket assigned
print "[==>] Received incoming connection from %s:%d" % (
addr[0], addr[1])
# start a thread to talk to the remote host
proxy_thread = threading.Thread(
target=proxy_handler,
args=(client_socket, remote_host, remote_port, receive_first))
# check above that proxy_handler is a function defined before this call
# to threading.Thread
proxy_thread.start()
|
Add the main server loopimport sys
import socket
import threading
def server_loop(local_host, local_port, remote_host, remote_port,
receive_first):
"""
The proxy is represented by the local host and port. The remote host and
port represent the service's server.
"""
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
server.bind((local_host, local_port)) # argument is a tuple
except:
# I recomment using port 3000 or greater
print "[!!] Failed to listen on %s:%d" % (local_host, local_port)
print "[!!] Check for other listening sockets or correct permissions."
sys.exit(0)
print "[*] Listening on %s:%d" % (local_host, local_port)
server.listen(5)
while(True):
client_socket, addr = server.accept()
# print out info of the local socket assigned
print "[==>] Received incoming connection from %s:%d" % (
addr[0], addr[1])
# start a thread to talk to the remote host
proxy_thread = threading.Thread(
target=proxy_handler,
args=(client_socket, remote_host, remote_port, receive_first))
# check above that proxy_handler is a function defined before this call
# to threading.Thread
proxy_thread.start()
|
<commit_before><commit_msg>Add the main server loop<commit_after>import sys
import socket
import threading
def server_loop(local_host, local_port, remote_host, remote_port,
receive_first):
"""
The proxy is represented by the local host and port. The remote host and
port represent the service's server.
"""
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
server.bind((local_host, local_port)) # argument is a tuple
except:
# I recomment using port 3000 or greater
print "[!!] Failed to listen on %s:%d" % (local_host, local_port)
print "[!!] Check for other listening sockets or correct permissions."
sys.exit(0)
print "[*] Listening on %s:%d" % (local_host, local_port)
server.listen(5)
while(True):
client_socket, addr = server.accept()
# print out info of the local socket assigned
print "[==>] Received incoming connection from %s:%d" % (
addr[0], addr[1])
# start a thread to talk to the remote host
proxy_thread = threading.Thread(
target=proxy_handler,
args=(client_socket, remote_host, remote_port, receive_first))
# check above that proxy_handler is a function defined before this call
# to threading.Thread
proxy_thread.start()
|
|
906741c38d074fec9d304a74c54937db0d569b95
|
directinotidy.py
|
directinotidy.py
|
from Queue import Queue
from thread import start_new_thread
import inotify.adapters
import filesorter
import dmcutils
import os
__author__ = 'falko'
filesfifo = Queue()
fsort = filesorter
def processQueue():
while True:
newFile = filesfifo.get()
fsort.processFile(newFile)
def startinotify():
i = inotify.adapters.InotifyTree(dmcutils.commandarg.inputFolder)
for event in i.event_gen():
if event is not None:
(header, type_names, watch_path, filename) = event
if "IN_CLOSE_WRITE" in type_names:
filesfifo.put(os.path.join(watch_path,filename) )
if __name__ == "__main__":
dmcutils.init()
fsort.init(dmcutils.commandarg)
start_new_thread(processQueue, ())
startinotify()
|
Add inotify alternative to watchdog
|
Add inotify alternative to watchdog
Watchdog was not working as expected when used in a networking enviroment.
Inotify is used in directinotify.py to watch a directory for IN_CLOSE_WRITE
|
Python
|
mit
|
HaBaLeS/digital-mess-cleaner
|
Add inotify alternative to watchdog
Watchdog was not working as expected when used in a networking enviroment.
Inotify is used in directinotify.py to watch a directory for IN_CLOSE_WRITE
|
from Queue import Queue
from thread import start_new_thread
import inotify.adapters
import filesorter
import dmcutils
import os
__author__ = 'falko'
filesfifo = Queue()
fsort = filesorter
def processQueue():
while True:
newFile = filesfifo.get()
fsort.processFile(newFile)
def startinotify():
i = inotify.adapters.InotifyTree(dmcutils.commandarg.inputFolder)
for event in i.event_gen():
if event is not None:
(header, type_names, watch_path, filename) = event
if "IN_CLOSE_WRITE" in type_names:
filesfifo.put(os.path.join(watch_path,filename) )
if __name__ == "__main__":
dmcutils.init()
fsort.init(dmcutils.commandarg)
start_new_thread(processQueue, ())
startinotify()
|
<commit_before><commit_msg>Add inotify alternative to watchdog
Watchdog was not working as expected when used in a networking enviroment.
Inotify is used in directinotify.py to watch a directory for IN_CLOSE_WRITE<commit_after>
|
from Queue import Queue
from thread import start_new_thread
import inotify.adapters
import filesorter
import dmcutils
import os
__author__ = 'falko'
filesfifo = Queue()
fsort = filesorter
def processQueue():
while True:
newFile = filesfifo.get()
fsort.processFile(newFile)
def startinotify():
i = inotify.adapters.InotifyTree(dmcutils.commandarg.inputFolder)
for event in i.event_gen():
if event is not None:
(header, type_names, watch_path, filename) = event
if "IN_CLOSE_WRITE" in type_names:
filesfifo.put(os.path.join(watch_path,filename) )
if __name__ == "__main__":
dmcutils.init()
fsort.init(dmcutils.commandarg)
start_new_thread(processQueue, ())
startinotify()
|
Add inotify alternative to watchdog
Watchdog was not working as expected when used in a networking enviroment.
Inotify is used in directinotify.py to watch a directory for IN_CLOSE_WRITEfrom Queue import Queue
from thread import start_new_thread
import inotify.adapters
import filesorter
import dmcutils
import os
__author__ = 'falko'
filesfifo = Queue()
fsort = filesorter
def processQueue():
while True:
newFile = filesfifo.get()
fsort.processFile(newFile)
def startinotify():
i = inotify.adapters.InotifyTree(dmcutils.commandarg.inputFolder)
for event in i.event_gen():
if event is not None:
(header, type_names, watch_path, filename) = event
if "IN_CLOSE_WRITE" in type_names:
filesfifo.put(os.path.join(watch_path,filename) )
if __name__ == "__main__":
dmcutils.init()
fsort.init(dmcutils.commandarg)
start_new_thread(processQueue, ())
startinotify()
|
<commit_before><commit_msg>Add inotify alternative to watchdog
Watchdog was not working as expected when used in a networking enviroment.
Inotify is used in directinotify.py to watch a directory for IN_CLOSE_WRITE<commit_after>from Queue import Queue
from thread import start_new_thread
import inotify.adapters
import filesorter
import dmcutils
import os
__author__ = 'falko'
filesfifo = Queue()
fsort = filesorter
def processQueue():
while True:
newFile = filesfifo.get()
fsort.processFile(newFile)
def startinotify():
i = inotify.adapters.InotifyTree(dmcutils.commandarg.inputFolder)
for event in i.event_gen():
if event is not None:
(header, type_names, watch_path, filename) = event
if "IN_CLOSE_WRITE" in type_names:
filesfifo.put(os.path.join(watch_path,filename) )
if __name__ == "__main__":
dmcutils.init()
fsort.init(dmcutils.commandarg)
start_new_thread(processQueue, ())
startinotify()
|
|
d55df3a30820b24e16beef1b5c824b8ef32b7d61
|
select_files_for_runlumidataset.py
|
select_files_for_runlumidataset.py
|
#!/usr/bin/env python
import argparse
from dbs.apis.dbsClient import DbsApi
_PREFIX = 'root://eoscms.cern.ch//eos/cms/'
def getFilesForQuery(args):
global _PREFIX
query = {
'dataset' : args.dataset,
'run_num': args.run,
'lumi_list': [l for l in range(args.ls_min, args.ls_max+1)]
}
api = DbsApi(url="https://cmsweb.cern.ch/dbs/prod/global/DBSReader")
files = api.listFiles(**query)
o = open("file_list_%d_LS%d-%d.txt" % (args.run, args.ls_min, args.ls_max), 'w')
o.write(' '.join([_PREFIX + f['logical_file_name'] for f in files]))
o.close()
def main():
parser = argparse.ArgumentParser(description='Locally donwload the list of files for a specific (dataset,run,[ls]) combination')
parser.add_argument('-l', '--ls_min',
help='Minimum Lumisection to consider, inclusive',
default=1,
type=int,
required=True)
parser.add_argument('-m', '--ls_max',
help='Maximum Lumisection to consider, inclusive',
default=10,
type=int,
required=True)
parser.add_argument('-d', '--dataset',
help='Dataset from which to extract the list of files',
default='/ExpressPhysics/Run2016B-Express-v2/FEVT',
type=str,
required=True)
parser.add_argument('-r', '--run',
help='Run Number to consider',
type=int,
default=273158,
required=True)
args = parser.parse_args()
getFilesForQuery(args)
if __name__ == '__main__':
main()
|
Add a script to find files for DS/Run/LS
|
Add a script to find files for DS/Run/LS
|
Python
|
mit
|
rovere/utilities,rovere/utilities,rovere/utilities,rovere/utilities
|
Add a script to find files for DS/Run/LS
|
#!/usr/bin/env python
import argparse
from dbs.apis.dbsClient import DbsApi
_PREFIX = 'root://eoscms.cern.ch//eos/cms/'
def getFilesForQuery(args):
global _PREFIX
query = {
'dataset' : args.dataset,
'run_num': args.run,
'lumi_list': [l for l in range(args.ls_min, args.ls_max+1)]
}
api = DbsApi(url="https://cmsweb.cern.ch/dbs/prod/global/DBSReader")
files = api.listFiles(**query)
o = open("file_list_%d_LS%d-%d.txt" % (args.run, args.ls_min, args.ls_max), 'w')
o.write(' '.join([_PREFIX + f['logical_file_name'] for f in files]))
o.close()
def main():
parser = argparse.ArgumentParser(description='Locally donwload the list of files for a specific (dataset,run,[ls]) combination')
parser.add_argument('-l', '--ls_min',
help='Minimum Lumisection to consider, inclusive',
default=1,
type=int,
required=True)
parser.add_argument('-m', '--ls_max',
help='Maximum Lumisection to consider, inclusive',
default=10,
type=int,
required=True)
parser.add_argument('-d', '--dataset',
help='Dataset from which to extract the list of files',
default='/ExpressPhysics/Run2016B-Express-v2/FEVT',
type=str,
required=True)
parser.add_argument('-r', '--run',
help='Run Number to consider',
type=int,
default=273158,
required=True)
args = parser.parse_args()
getFilesForQuery(args)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a script to find files for DS/Run/LS<commit_after>
|
#!/usr/bin/env python
import argparse
from dbs.apis.dbsClient import DbsApi
_PREFIX = 'root://eoscms.cern.ch//eos/cms/'
def getFilesForQuery(args):
global _PREFIX
query = {
'dataset' : args.dataset,
'run_num': args.run,
'lumi_list': [l for l in range(args.ls_min, args.ls_max+1)]
}
api = DbsApi(url="https://cmsweb.cern.ch/dbs/prod/global/DBSReader")
files = api.listFiles(**query)
o = open("file_list_%d_LS%d-%d.txt" % (args.run, args.ls_min, args.ls_max), 'w')
o.write(' '.join([_PREFIX + f['logical_file_name'] for f in files]))
o.close()
def main():
parser = argparse.ArgumentParser(description='Locally donwload the list of files for a specific (dataset,run,[ls]) combination')
parser.add_argument('-l', '--ls_min',
help='Minimum Lumisection to consider, inclusive',
default=1,
type=int,
required=True)
parser.add_argument('-m', '--ls_max',
help='Maximum Lumisection to consider, inclusive',
default=10,
type=int,
required=True)
parser.add_argument('-d', '--dataset',
help='Dataset from which to extract the list of files',
default='/ExpressPhysics/Run2016B-Express-v2/FEVT',
type=str,
required=True)
parser.add_argument('-r', '--run',
help='Run Number to consider',
type=int,
default=273158,
required=True)
args = parser.parse_args()
getFilesForQuery(args)
if __name__ == '__main__':
main()
|
Add a script to find files for DS/Run/LS#!/usr/bin/env python
import argparse
from dbs.apis.dbsClient import DbsApi
_PREFIX = 'root://eoscms.cern.ch//eos/cms/'
def getFilesForQuery(args):
global _PREFIX
query = {
'dataset' : args.dataset,
'run_num': args.run,
'lumi_list': [l for l in range(args.ls_min, args.ls_max+1)]
}
api = DbsApi(url="https://cmsweb.cern.ch/dbs/prod/global/DBSReader")
files = api.listFiles(**query)
o = open("file_list_%d_LS%d-%d.txt" % (args.run, args.ls_min, args.ls_max), 'w')
o.write(' '.join([_PREFIX + f['logical_file_name'] for f in files]))
o.close()
def main():
parser = argparse.ArgumentParser(description='Locally donwload the list of files for a specific (dataset,run,[ls]) combination')
parser.add_argument('-l', '--ls_min',
help='Minimum Lumisection to consider, inclusive',
default=1,
type=int,
required=True)
parser.add_argument('-m', '--ls_max',
help='Maximum Lumisection to consider, inclusive',
default=10,
type=int,
required=True)
parser.add_argument('-d', '--dataset',
help='Dataset from which to extract the list of files',
default='/ExpressPhysics/Run2016B-Express-v2/FEVT',
type=str,
required=True)
parser.add_argument('-r', '--run',
help='Run Number to consider',
type=int,
default=273158,
required=True)
args = parser.parse_args()
getFilesForQuery(args)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a script to find files for DS/Run/LS<commit_after>#!/usr/bin/env python
import argparse
from dbs.apis.dbsClient import DbsApi
_PREFIX = 'root://eoscms.cern.ch//eos/cms/'
def getFilesForQuery(args):
global _PREFIX
query = {
'dataset' : args.dataset,
'run_num': args.run,
'lumi_list': [l for l in range(args.ls_min, args.ls_max+1)]
}
api = DbsApi(url="https://cmsweb.cern.ch/dbs/prod/global/DBSReader")
files = api.listFiles(**query)
o = open("file_list_%d_LS%d-%d.txt" % (args.run, args.ls_min, args.ls_max), 'w')
o.write(' '.join([_PREFIX + f['logical_file_name'] for f in files]))
o.close()
def main():
parser = argparse.ArgumentParser(description='Locally donwload the list of files for a specific (dataset,run,[ls]) combination')
parser.add_argument('-l', '--ls_min',
help='Minimum Lumisection to consider, inclusive',
default=1,
type=int,
required=True)
parser.add_argument('-m', '--ls_max',
help='Maximum Lumisection to consider, inclusive',
default=10,
type=int,
required=True)
parser.add_argument('-d', '--dataset',
help='Dataset from which to extract the list of files',
default='/ExpressPhysics/Run2016B-Express-v2/FEVT',
type=str,
required=True)
parser.add_argument('-r', '--run',
help='Run Number to consider',
type=int,
default=273158,
required=True)
args = parser.parse_args()
getFilesForQuery(args)
if __name__ == '__main__':
main()
|
|
bb3bab2b8efb49d8a3ac9070289681d12fb8c7e2
|
modules/performanceplatform/files/sensu-elasticsearch.py
|
modules/performanceplatform/files/sensu-elasticsearch.py
|
#!/usr/bin/env python
# encoding: utf-8
import datetime
import json
import requests
JSON_REQUEST = {
"query": {
"filtered": {
"query": {
"bool": {
"should": [
{
"query_string": {
"query": "*"
}
}
]
}
},
"filter": {
"bool": {
"must": [
{
"match_all": {
}
},
{
"range": {
"@timestamp": {
"from": "now-1h",
"to": "now"
}
}
},
{
"fquery": {
"query": {
"field": {
"@fields.levelname": {
"query": "\"ERROR\""
}
}
},
"_cache": True
}
},
{
"bool": {
"must": [
{
"match_all": {
}
}
]
}
}
]
}
}
}
},
"highlight": {
"fields": {
},
"fragment_size": 2147483647,
"pre_tags": [
"@start-highlight@"
],
"post_tags": [
"@end-highlight@"
]
},
"size": 500,
"sort": [
{
"@timestamp": {
"order": "desc"
}
}
]
}
def main():
now = datetime.datetime.now().date()
es_host = 'elasticsearch:9200'
es_index = 'logstash-{year:04}.{month:02}.{day:02}'.format(
year=now.year, month=now.month, day=now.day)
response = requests.post(
'http://{}/{}/_search'.format(es_host, es_index),
headers={'Content-Type': 'application/json'},
data=json.dumps(JSON_REQUEST))
response.raise_for_status()
response_data = json.loads(response.content)
from pprint import pprint
hits = response_data['hits']['hits']
print("{} log matches".format(len(hits)))
for i, hit in enumerate(hits):
print("--- Log message #{} --- ".format(i + 1))
pprint(hits[0]['_source'])
return 2 if len(hits) > 0 else 0
if __name__ == '__main__':
main()
|
Add a version of sensu checking script which works on monitoring-1
|
Add a version of sensu checking script which works on monitoring-1
|
Python
|
mit
|
alphagov/pp-puppet,alphagov/pp-puppet,alphagov/pp-puppet,alphagov/pp-puppet
|
Add a version of sensu checking script which works on monitoring-1
|
#!/usr/bin/env python
# encoding: utf-8
import datetime
import json
import requests
JSON_REQUEST = {
"query": {
"filtered": {
"query": {
"bool": {
"should": [
{
"query_string": {
"query": "*"
}
}
]
}
},
"filter": {
"bool": {
"must": [
{
"match_all": {
}
},
{
"range": {
"@timestamp": {
"from": "now-1h",
"to": "now"
}
}
},
{
"fquery": {
"query": {
"field": {
"@fields.levelname": {
"query": "\"ERROR\""
}
}
},
"_cache": True
}
},
{
"bool": {
"must": [
{
"match_all": {
}
}
]
}
}
]
}
}
}
},
"highlight": {
"fields": {
},
"fragment_size": 2147483647,
"pre_tags": [
"@start-highlight@"
],
"post_tags": [
"@end-highlight@"
]
},
"size": 500,
"sort": [
{
"@timestamp": {
"order": "desc"
}
}
]
}
def main():
now = datetime.datetime.now().date()
es_host = 'elasticsearch:9200'
es_index = 'logstash-{year:04}.{month:02}.{day:02}'.format(
year=now.year, month=now.month, day=now.day)
response = requests.post(
'http://{}/{}/_search'.format(es_host, es_index),
headers={'Content-Type': 'application/json'},
data=json.dumps(JSON_REQUEST))
response.raise_for_status()
response_data = json.loads(response.content)
from pprint import pprint
hits = response_data['hits']['hits']
print("{} log matches".format(len(hits)))
for i, hit in enumerate(hits):
print("--- Log message #{} --- ".format(i + 1))
pprint(hits[0]['_source'])
return 2 if len(hits) > 0 else 0
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a version of sensu checking script which works on monitoring-1<commit_after>
|
#!/usr/bin/env python
# encoding: utf-8
import datetime
import json
import requests
JSON_REQUEST = {
"query": {
"filtered": {
"query": {
"bool": {
"should": [
{
"query_string": {
"query": "*"
}
}
]
}
},
"filter": {
"bool": {
"must": [
{
"match_all": {
}
},
{
"range": {
"@timestamp": {
"from": "now-1h",
"to": "now"
}
}
},
{
"fquery": {
"query": {
"field": {
"@fields.levelname": {
"query": "\"ERROR\""
}
}
},
"_cache": True
}
},
{
"bool": {
"must": [
{
"match_all": {
}
}
]
}
}
]
}
}
}
},
"highlight": {
"fields": {
},
"fragment_size": 2147483647,
"pre_tags": [
"@start-highlight@"
],
"post_tags": [
"@end-highlight@"
]
},
"size": 500,
"sort": [
{
"@timestamp": {
"order": "desc"
}
}
]
}
def main():
now = datetime.datetime.now().date()
es_host = 'elasticsearch:9200'
es_index = 'logstash-{year:04}.{month:02}.{day:02}'.format(
year=now.year, month=now.month, day=now.day)
response = requests.post(
'http://{}/{}/_search'.format(es_host, es_index),
headers={'Content-Type': 'application/json'},
data=json.dumps(JSON_REQUEST))
response.raise_for_status()
response_data = json.loads(response.content)
from pprint import pprint
hits = response_data['hits']['hits']
print("{} log matches".format(len(hits)))
for i, hit in enumerate(hits):
print("--- Log message #{} --- ".format(i + 1))
pprint(hits[0]['_source'])
return 2 if len(hits) > 0 else 0
if __name__ == '__main__':
main()
|
Add a version of sensu checking script which works on monitoring-1#!/usr/bin/env python
# encoding: utf-8
import datetime
import json
import requests
JSON_REQUEST = {
"query": {
"filtered": {
"query": {
"bool": {
"should": [
{
"query_string": {
"query": "*"
}
}
]
}
},
"filter": {
"bool": {
"must": [
{
"match_all": {
}
},
{
"range": {
"@timestamp": {
"from": "now-1h",
"to": "now"
}
}
},
{
"fquery": {
"query": {
"field": {
"@fields.levelname": {
"query": "\"ERROR\""
}
}
},
"_cache": True
}
},
{
"bool": {
"must": [
{
"match_all": {
}
}
]
}
}
]
}
}
}
},
"highlight": {
"fields": {
},
"fragment_size": 2147483647,
"pre_tags": [
"@start-highlight@"
],
"post_tags": [
"@end-highlight@"
]
},
"size": 500,
"sort": [
{
"@timestamp": {
"order": "desc"
}
}
]
}
def main():
now = datetime.datetime.now().date()
es_host = 'elasticsearch:9200'
es_index = 'logstash-{year:04}.{month:02}.{day:02}'.format(
year=now.year, month=now.month, day=now.day)
response = requests.post(
'http://{}/{}/_search'.format(es_host, es_index),
headers={'Content-Type': 'application/json'},
data=json.dumps(JSON_REQUEST))
response.raise_for_status()
response_data = json.loads(response.content)
from pprint import pprint
hits = response_data['hits']['hits']
print("{} log matches".format(len(hits)))
for i, hit in enumerate(hits):
print("--- Log message #{} --- ".format(i + 1))
pprint(hits[0]['_source'])
return 2 if len(hits) > 0 else 0
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a version of sensu checking script which works on monitoring-1<commit_after>#!/usr/bin/env python
# encoding: utf-8
import datetime
import json
import requests
JSON_REQUEST = {
"query": {
"filtered": {
"query": {
"bool": {
"should": [
{
"query_string": {
"query": "*"
}
}
]
}
},
"filter": {
"bool": {
"must": [
{
"match_all": {
}
},
{
"range": {
"@timestamp": {
"from": "now-1h",
"to": "now"
}
}
},
{
"fquery": {
"query": {
"field": {
"@fields.levelname": {
"query": "\"ERROR\""
}
}
},
"_cache": True
}
},
{
"bool": {
"must": [
{
"match_all": {
}
}
]
}
}
]
}
}
}
},
"highlight": {
"fields": {
},
"fragment_size": 2147483647,
"pre_tags": [
"@start-highlight@"
],
"post_tags": [
"@end-highlight@"
]
},
"size": 500,
"sort": [
{
"@timestamp": {
"order": "desc"
}
}
]
}
def main():
now = datetime.datetime.now().date()
es_host = 'elasticsearch:9200'
es_index = 'logstash-{year:04}.{month:02}.{day:02}'.format(
year=now.year, month=now.month, day=now.day)
response = requests.post(
'http://{}/{}/_search'.format(es_host, es_index),
headers={'Content-Type': 'application/json'},
data=json.dumps(JSON_REQUEST))
response.raise_for_status()
response_data = json.loads(response.content)
from pprint import pprint
hits = response_data['hits']['hits']
print("{} log matches".format(len(hits)))
for i, hit in enumerate(hits):
print("--- Log message #{} --- ".format(i + 1))
pprint(hits[0]['_source'])
return 2 if len(hits) > 0 else 0
if __name__ == '__main__':
main()
|
|
b5a7dba9339b199b96cbb0e0682b20437f738311
|
batchSTSconverter.py
|
batchSTSconverter.py
|
from PyQt4 import QtGui
import sys
import pyOmicron as pyO
import re
import numpy as np
app=QtGui.QApplication(sys.argv)
path=QtGui.QFileDialog.getExistingDirectory()
#path=r"C:\Users\scholi\Desktop\15-Oct-2015"
del app
del QtGui
M=pyO.Matrix(path)
STS={}
for i in M.images:
r=re.search(r"--([0-9]+)_([0-9]+).I\(V\)_mtrx",i)
if r:
ID=int(r.group(1))
num=int(r.group(2))
if ID in STS: STS[ID]+=1
else: STS[ID]=1
for ID in STS:
x,y=M.getSTS(ID)
R=np.column_stack([x,y])
for num in range(1,STS[ID]):
x,y=M.getSTS(ID,num)
R=np.column_stack((R,y))
np.savetxt(str(path+"/STS-%i.dat"%(ID)),R,header="Bias[V] "+" ".join([str(i+1) for i in range(STS[ID])]))
|
Add a batch STS to ascii exporter
|
Add a batch STS to ascii exporter
|
Python
|
apache-2.0
|
scholi/pyOmicron
|
Add a batch STS to ascii exporter
|
from PyQt4 import QtGui
import sys
import pyOmicron as pyO
import re
import numpy as np
app=QtGui.QApplication(sys.argv)
path=QtGui.QFileDialog.getExistingDirectory()
#path=r"C:\Users\scholi\Desktop\15-Oct-2015"
del app
del QtGui
M=pyO.Matrix(path)
STS={}
for i in M.images:
r=re.search(r"--([0-9]+)_([0-9]+).I\(V\)_mtrx",i)
if r:
ID=int(r.group(1))
num=int(r.group(2))
if ID in STS: STS[ID]+=1
else: STS[ID]=1
for ID in STS:
x,y=M.getSTS(ID)
R=np.column_stack([x,y])
for num in range(1,STS[ID]):
x,y=M.getSTS(ID,num)
R=np.column_stack((R,y))
np.savetxt(str(path+"/STS-%i.dat"%(ID)),R,header="Bias[V] "+" ".join([str(i+1) for i in range(STS[ID])]))
|
<commit_before><commit_msg>Add a batch STS to ascii exporter<commit_after>
|
from PyQt4 import QtGui
import sys
import pyOmicron as pyO
import re
import numpy as np
app=QtGui.QApplication(sys.argv)
path=QtGui.QFileDialog.getExistingDirectory()
#path=r"C:\Users\scholi\Desktop\15-Oct-2015"
del app
del QtGui
M=pyO.Matrix(path)
STS={}
for i in M.images:
r=re.search(r"--([0-9]+)_([0-9]+).I\(V\)_mtrx",i)
if r:
ID=int(r.group(1))
num=int(r.group(2))
if ID in STS: STS[ID]+=1
else: STS[ID]=1
for ID in STS:
x,y=M.getSTS(ID)
R=np.column_stack([x,y])
for num in range(1,STS[ID]):
x,y=M.getSTS(ID,num)
R=np.column_stack((R,y))
np.savetxt(str(path+"/STS-%i.dat"%(ID)),R,header="Bias[V] "+" ".join([str(i+1) for i in range(STS[ID])]))
|
Add a batch STS to ascii exporterfrom PyQt4 import QtGui
import sys
import pyOmicron as pyO
import re
import numpy as np
app=QtGui.QApplication(sys.argv)
path=QtGui.QFileDialog.getExistingDirectory()
#path=r"C:\Users\scholi\Desktop\15-Oct-2015"
del app
del QtGui
M=pyO.Matrix(path)
STS={}
for i in M.images:
r=re.search(r"--([0-9]+)_([0-9]+).I\(V\)_mtrx",i)
if r:
ID=int(r.group(1))
num=int(r.group(2))
if ID in STS: STS[ID]+=1
else: STS[ID]=1
for ID in STS:
x,y=M.getSTS(ID)
R=np.column_stack([x,y])
for num in range(1,STS[ID]):
x,y=M.getSTS(ID,num)
R=np.column_stack((R,y))
np.savetxt(str(path+"/STS-%i.dat"%(ID)),R,header="Bias[V] "+" ".join([str(i+1) for i in range(STS[ID])]))
|
<commit_before><commit_msg>Add a batch STS to ascii exporter<commit_after>from PyQt4 import QtGui
import sys
import pyOmicron as pyO
import re
import numpy as np
app=QtGui.QApplication(sys.argv)
path=QtGui.QFileDialog.getExistingDirectory()
#path=r"C:\Users\scholi\Desktop\15-Oct-2015"
del app
del QtGui
M=pyO.Matrix(path)
STS={}
for i in M.images:
r=re.search(r"--([0-9]+)_([0-9]+).I\(V\)_mtrx",i)
if r:
ID=int(r.group(1))
num=int(r.group(2))
if ID in STS: STS[ID]+=1
else: STS[ID]=1
for ID in STS:
x,y=M.getSTS(ID)
R=np.column_stack([x,y])
for num in range(1,STS[ID]):
x,y=M.getSTS(ID,num)
R=np.column_stack((R,y))
np.savetxt(str(path+"/STS-%i.dat"%(ID)),R,header="Bias[V] "+" ".join([str(i+1) for i in range(STS[ID])]))
|
|
160049859e40855082f7482421df383a5ed80df4
|
examples/fips.py
|
examples/fips.py
|
#!/usr/bin/env python3
'''
Suppose you're trying to estimate someone's median household income
based on their current location. Perhaps they posted a photograph on
Twitter that has latitude and longitude in its EXIF data. You might go
to the FCC census block conversions API (https://www.fcc.gov/general
/census-block-conversions-api) to figure out in which census block the
photo was taken.
'''
from destructure import match, MatchError, Binding, Switch
import json
from urllib.request import urlopen
from urllib.parse import urlencode
url = 'http://data.fcc.gov/api/block/find?'
params = {'format': 'json', 'showall': 'true',
# 'latitude': 28.35975, 'longitude': -81.421988}
'latitude': 28.359, 'longitude': -81.421}
results = Binding()
schema_one = \
{
"County": {
"name": results.county,
"FIPS": str,
},
"State": {
"name": results.state,
"code": str,
"FIPS": str,
},
"Block": {
"FIPS": results.fips,
},
"executionTime": str,
"status": "OK",
}
schema_intersection = \
{
"executionTime": str,
"County": {
"FIPS": str,
"name": results.county
},
"messages": [
"FCC0001: The coordinate lies on the boundary of mulitple blocks, first FIPS is displayed. For a complete list use showall=true to display 'intersection' element in the Block"
],
"Block": {
"FIPS": str,
"intersection": results.intersection
},
"status": "OK",
"State": {
"code": str,
"FIPS": str,
"name": results.state
}
}
with urlopen(url + urlencode(params)) as response:
data = response.read()
text = data.decode('utf-8')
mapping = json.loads(text)
s = Switch(data=mapping, binding=results)
if s.case(schema_one):
codes = [results.fips]
elif s.case(schema_intersection):
codes = [block['FIPS'] for block in results.intersection]
else:
raise MatchError('Could not match any schemas')
if not codes or None in codes:
fmt = 'No FIPS found for {latitude}, {longitude}'
raise ValueError(fmt.format(**params))
for fips in codes:
print(fips)
# From there, it's on to http://api.census.gov to finish the task.
|
Add simple switch/case usage example
|
Add simple switch/case usage example
|
Python
|
mit
|
selik/destructure
|
Add simple switch/case usage example
|
#!/usr/bin/env python3
'''
Suppose you're trying to estimate someone's median household income
based on their current location. Perhaps they posted a photograph on
Twitter that has latitude and longitude in its EXIF data. You might go
to the FCC census block conversions API (https://www.fcc.gov/general
/census-block-conversions-api) to figure out in which census block the
photo was taken.
'''
from destructure import match, MatchError, Binding, Switch
import json
from urllib.request import urlopen
from urllib.parse import urlencode
url = 'http://data.fcc.gov/api/block/find?'
params = {'format': 'json', 'showall': 'true',
# 'latitude': 28.35975, 'longitude': -81.421988}
'latitude': 28.359, 'longitude': -81.421}
results = Binding()
schema_one = \
{
"County": {
"name": results.county,
"FIPS": str,
},
"State": {
"name": results.state,
"code": str,
"FIPS": str,
},
"Block": {
"FIPS": results.fips,
},
"executionTime": str,
"status": "OK",
}
schema_intersection = \
{
"executionTime": str,
"County": {
"FIPS": str,
"name": results.county
},
"messages": [
"FCC0001: The coordinate lies on the boundary of mulitple blocks, first FIPS is displayed. For a complete list use showall=true to display 'intersection' element in the Block"
],
"Block": {
"FIPS": str,
"intersection": results.intersection
},
"status": "OK",
"State": {
"code": str,
"FIPS": str,
"name": results.state
}
}
with urlopen(url + urlencode(params)) as response:
data = response.read()
text = data.decode('utf-8')
mapping = json.loads(text)
s = Switch(data=mapping, binding=results)
if s.case(schema_one):
codes = [results.fips]
elif s.case(schema_intersection):
codes = [block['FIPS'] for block in results.intersection]
else:
raise MatchError('Could not match any schemas')
if not codes or None in codes:
fmt = 'No FIPS found for {latitude}, {longitude}'
raise ValueError(fmt.format(**params))
for fips in codes:
print(fips)
# From there, it's on to http://api.census.gov to finish the task.
|
<commit_before><commit_msg>Add simple switch/case usage example<commit_after>
|
#!/usr/bin/env python3
'''
Suppose you're trying to estimate someone's median household income
based on their current location. Perhaps they posted a photograph on
Twitter that has latitude and longitude in its EXIF data. You might go
to the FCC census block conversions API (https://www.fcc.gov/general
/census-block-conversions-api) to figure out in which census block the
photo was taken.
'''
from destructure import match, MatchError, Binding, Switch
import json
from urllib.request import urlopen
from urllib.parse import urlencode
url = 'http://data.fcc.gov/api/block/find?'
params = {'format': 'json', 'showall': 'true',
# 'latitude': 28.35975, 'longitude': -81.421988}
'latitude': 28.359, 'longitude': -81.421}
results = Binding()
schema_one = \
{
"County": {
"name": results.county,
"FIPS": str,
},
"State": {
"name": results.state,
"code": str,
"FIPS": str,
},
"Block": {
"FIPS": results.fips,
},
"executionTime": str,
"status": "OK",
}
schema_intersection = \
{
"executionTime": str,
"County": {
"FIPS": str,
"name": results.county
},
"messages": [
"FCC0001: The coordinate lies on the boundary of mulitple blocks, first FIPS is displayed. For a complete list use showall=true to display 'intersection' element in the Block"
],
"Block": {
"FIPS": str,
"intersection": results.intersection
},
"status": "OK",
"State": {
"code": str,
"FIPS": str,
"name": results.state
}
}
with urlopen(url + urlencode(params)) as response:
data = response.read()
text = data.decode('utf-8')
mapping = json.loads(text)
s = Switch(data=mapping, binding=results)
if s.case(schema_one):
codes = [results.fips]
elif s.case(schema_intersection):
codes = [block['FIPS'] for block in results.intersection]
else:
raise MatchError('Could not match any schemas')
if not codes or None in codes:
fmt = 'No FIPS found for {latitude}, {longitude}'
raise ValueError(fmt.format(**params))
for fips in codes:
print(fips)
# From there, it's on to http://api.census.gov to finish the task.
|
Add simple switch/case usage example#!/usr/bin/env python3
'''
Suppose you're trying to estimate someone's median household income
based on their current location. Perhaps they posted a photograph on
Twitter that has latitude and longitude in its EXIF data. You might go
to the FCC census block conversions API (https://www.fcc.gov/general
/census-block-conversions-api) to figure out in which census block the
photo was taken.
'''
from destructure import match, MatchError, Binding, Switch
import json
from urllib.request import urlopen
from urllib.parse import urlencode
url = 'http://data.fcc.gov/api/block/find?'
params = {'format': 'json', 'showall': 'true',
# 'latitude': 28.35975, 'longitude': -81.421988}
'latitude': 28.359, 'longitude': -81.421}
results = Binding()
schema_one = \
{
"County": {
"name": results.county,
"FIPS": str,
},
"State": {
"name": results.state,
"code": str,
"FIPS": str,
},
"Block": {
"FIPS": results.fips,
},
"executionTime": str,
"status": "OK",
}
schema_intersection = \
{
"executionTime": str,
"County": {
"FIPS": str,
"name": results.county
},
"messages": [
"FCC0001: The coordinate lies on the boundary of mulitple blocks, first FIPS is displayed. For a complete list use showall=true to display 'intersection' element in the Block"
],
"Block": {
"FIPS": str,
"intersection": results.intersection
},
"status": "OK",
"State": {
"code": str,
"FIPS": str,
"name": results.state
}
}
with urlopen(url + urlencode(params)) as response:
data = response.read()
text = data.decode('utf-8')
mapping = json.loads(text)
s = Switch(data=mapping, binding=results)
if s.case(schema_one):
codes = [results.fips]
elif s.case(schema_intersection):
codes = [block['FIPS'] for block in results.intersection]
else:
raise MatchError('Could not match any schemas')
if not codes or None in codes:
fmt = 'No FIPS found for {latitude}, {longitude}'
raise ValueError(fmt.format(**params))
for fips in codes:
print(fips)
# From there, it's on to http://api.census.gov to finish the task.
|
<commit_before><commit_msg>Add simple switch/case usage example<commit_after>#!/usr/bin/env python3
'''
Suppose you're trying to estimate someone's median household income
based on their current location. Perhaps they posted a photograph on
Twitter that has latitude and longitude in its EXIF data. You might go
to the FCC census block conversions API (https://www.fcc.gov/general
/census-block-conversions-api) to figure out in which census block the
photo was taken.
'''
from destructure import match, MatchError, Binding, Switch
import json
from urllib.request import urlopen
from urllib.parse import urlencode
url = 'http://data.fcc.gov/api/block/find?'
params = {'format': 'json', 'showall': 'true',
# 'latitude': 28.35975, 'longitude': -81.421988}
'latitude': 28.359, 'longitude': -81.421}
results = Binding()
schema_one = \
{
"County": {
"name": results.county,
"FIPS": str,
},
"State": {
"name": results.state,
"code": str,
"FIPS": str,
},
"Block": {
"FIPS": results.fips,
},
"executionTime": str,
"status": "OK",
}
schema_intersection = \
{
"executionTime": str,
"County": {
"FIPS": str,
"name": results.county
},
"messages": [
"FCC0001: The coordinate lies on the boundary of mulitple blocks, first FIPS is displayed. For a complete list use showall=true to display 'intersection' element in the Block"
],
"Block": {
"FIPS": str,
"intersection": results.intersection
},
"status": "OK",
"State": {
"code": str,
"FIPS": str,
"name": results.state
}
}
with urlopen(url + urlencode(params)) as response:
data = response.read()
text = data.decode('utf-8')
mapping = json.loads(text)
s = Switch(data=mapping, binding=results)
if s.case(schema_one):
codes = [results.fips]
elif s.case(schema_intersection):
codes = [block['FIPS'] for block in results.intersection]
else:
raise MatchError('Could not match any schemas')
if not codes or None in codes:
fmt = 'No FIPS found for {latitude}, {longitude}'
raise ValueError(fmt.format(**params))
for fips in codes:
print(fips)
# From there, it's on to http://api.census.gov to finish the task.
|
|
fbc5215199af220fd0dcf6f0911363e5e8fe9190
|
data_analysis/filter_news.py
|
data_analysis/filter_news.py
|
"""Generate year files with news counts
Usage:
filter_news.py <directory> <output>
Options:
-h, --help
"""
from docopt import docopt
from os import listdir
from os.path import isfile, join
import datetime
import pandas as pd
if __name__ == "__main__":
# Parse the command line
args = docopt(__doc__)
# Array with the week we are considering
weeks = [42,43,44,45,46,47,48,49,50,51,52,1,23,4,5,6,7,8,9,10,11,12,13,14,15]
# Final count dictionary
news_count = {}
# Get only the files in the directory
onlyfiles = [f for f in listdir(args["<directory>"]) if isfile(join(args["<directory>"], f))]
# Loop over all the files and parse them
for file in onlyfiles:
# Split the filename and get the day/month/year
file_name = file.split("_")
day = file_name[2]
month = file_name[1]
year = file_name[3]
# Compute the week number
week_number = datetime.date(int(year), int(month), int(day)).isocalendar()[1]
# Read and parse the file only if it is in the week range we are considering
if week_number in weeks:
# Read the file
file = pd.read_csv(join(args["<directory>"], file))
# Count how many news we have, considering only the italian ones
total_news = file[file.lang_detected == "it"].count()
# If that year column is still empty, create it and set it to zero
if news_count.get(year, []) == []:
news_count[year] = [0 for x in range(53)]
# Increment the week count
news_count[year] += int(total_news)
# Generate the index for the future dataframe
df_index = []
# Add a zero in front of number less than 10
for i in weeks:
if i < 10:
number = "0" + str(i)
else:
number = str(i)
df_index.append(number)
# Generate the dataframe
final_df = pd.DataFrame(news_count)
final_df.set_index(df_index)
# Print the dataframe to show the result
print(final_df)
# Save it to file
final_df.to_csv(args["<output>"], index_label="Week")
|
Add algorithm to count the news and to produce an output file.
|
[data_analysis] Add algorithm to count the news and to produce an output file.
|
Python
|
mit
|
geektoni/Influenza-Like-Illness-Predictor,geektoni/Influenza-Like-Illness-Predictor
|
[data_analysis] Add algorithm to count the news and to produce an output file.
|
"""Generate year files with news counts
Usage:
filter_news.py <directory> <output>
Options:
-h, --help
"""
from docopt import docopt
from os import listdir
from os.path import isfile, join
import datetime
import pandas as pd
if __name__ == "__main__":
# Parse the command line
args = docopt(__doc__)
# Array with the week we are considering
weeks = [42,43,44,45,46,47,48,49,50,51,52,1,23,4,5,6,7,8,9,10,11,12,13,14,15]
# Final count dictionary
news_count = {}
# Get only the files in the directory
onlyfiles = [f for f in listdir(args["<directory>"]) if isfile(join(args["<directory>"], f))]
# Loop over all the files and parse them
for file in onlyfiles:
# Split the filename and get the day/month/year
file_name = file.split("_")
day = file_name[2]
month = file_name[1]
year = file_name[3]
# Compute the week number
week_number = datetime.date(int(year), int(month), int(day)).isocalendar()[1]
# Read and parse the file only if it is in the week range we are considering
if week_number in weeks:
# Read the file
file = pd.read_csv(join(args["<directory>"], file))
# Count how many news we have, considering only the italian ones
total_news = file[file.lang_detected == "it"].count()
# If that year column is still empty, create it and set it to zero
if news_count.get(year, []) == []:
news_count[year] = [0 for x in range(53)]
# Increment the week count
news_count[year] += int(total_news)
# Generate the index for the future dataframe
df_index = []
# Add a zero in front of number less than 10
for i in weeks:
if i < 10:
number = "0" + str(i)
else:
number = str(i)
df_index.append(number)
# Generate the dataframe
final_df = pd.DataFrame(news_count)
final_df.set_index(df_index)
# Print the dataframe to show the result
print(final_df)
# Save it to file
final_df.to_csv(args["<output>"], index_label="Week")
|
<commit_before><commit_msg>[data_analysis] Add algorithm to count the news and to produce an output file.<commit_after>
|
"""Generate year files with news counts
Usage:
filter_news.py <directory> <output>
Options:
-h, --help
"""
from docopt import docopt
from os import listdir
from os.path import isfile, join
import datetime
import pandas as pd
if __name__ == "__main__":
# Parse the command line
args = docopt(__doc__)
# Array with the week we are considering
weeks = [42,43,44,45,46,47,48,49,50,51,52,1,23,4,5,6,7,8,9,10,11,12,13,14,15]
# Final count dictionary
news_count = {}
# Get only the files in the directory
onlyfiles = [f for f in listdir(args["<directory>"]) if isfile(join(args["<directory>"], f))]
# Loop over all the files and parse them
for file in onlyfiles:
# Split the filename and get the day/month/year
file_name = file.split("_")
day = file_name[2]
month = file_name[1]
year = file_name[3]
# Compute the week number
week_number = datetime.date(int(year), int(month), int(day)).isocalendar()[1]
# Read and parse the file only if it is in the week range we are considering
if week_number in weeks:
# Read the file
file = pd.read_csv(join(args["<directory>"], file))
# Count how many news we have, considering only the italian ones
total_news = file[file.lang_detected == "it"].count()
# If that year column is still empty, create it and set it to zero
if news_count.get(year, []) == []:
news_count[year] = [0 for x in range(53)]
# Increment the week count
news_count[year] += int(total_news)
# Generate the index for the future dataframe
df_index = []
# Add a zero in front of number less than 10
for i in weeks:
if i < 10:
number = "0" + str(i)
else:
number = str(i)
df_index.append(number)
# Generate the dataframe
final_df = pd.DataFrame(news_count)
final_df.set_index(df_index)
# Print the dataframe to show the result
print(final_df)
# Save it to file
final_df.to_csv(args["<output>"], index_label="Week")
|
[data_analysis] Add algorithm to count the news and to produce an output file."""Generate year files with news counts
Usage:
filter_news.py <directory> <output>
Options:
-h, --help
"""
from docopt import docopt
from os import listdir
from os.path import isfile, join
import datetime
import pandas as pd
if __name__ == "__main__":
# Parse the command line
args = docopt(__doc__)
# Array with the week we are considering
weeks = [42,43,44,45,46,47,48,49,50,51,52,1,23,4,5,6,7,8,9,10,11,12,13,14,15]
# Final count dictionary
news_count = {}
# Get only the files in the directory
onlyfiles = [f for f in listdir(args["<directory>"]) if isfile(join(args["<directory>"], f))]
# Loop over all the files and parse them
for file in onlyfiles:
# Split the filename and get the day/month/year
file_name = file.split("_")
day = file_name[2]
month = file_name[1]
year = file_name[3]
# Compute the week number
week_number = datetime.date(int(year), int(month), int(day)).isocalendar()[1]
# Read and parse the file only if it is in the week range we are considering
if week_number in weeks:
# Read the file
file = pd.read_csv(join(args["<directory>"], file))
# Count how many news we have, considering only the italian ones
total_news = file[file.lang_detected == "it"].count()
# If that year column is still empty, create it and set it to zero
if news_count.get(year, []) == []:
news_count[year] = [0 for x in range(53)]
# Increment the week count
news_count[year] += int(total_news)
# Generate the index for the future dataframe
df_index = []
# Add a zero in front of number less than 10
for i in weeks:
if i < 10:
number = "0" + str(i)
else:
number = str(i)
df_index.append(number)
# Generate the dataframe
final_df = pd.DataFrame(news_count)
final_df.set_index(df_index)
# Print the dataframe to show the result
print(final_df)
# Save it to file
final_df.to_csv(args["<output>"], index_label="Week")
|
<commit_before><commit_msg>[data_analysis] Add algorithm to count the news and to produce an output file.<commit_after>"""Generate year files with news counts
Usage:
filter_news.py <directory> <output>
Options:
-h, --help
"""
from docopt import docopt
from os import listdir
from os.path import isfile, join
import datetime
import pandas as pd
if __name__ == "__main__":
# Parse the command line
args = docopt(__doc__)
# Array with the week we are considering
weeks = [42,43,44,45,46,47,48,49,50,51,52,1,23,4,5,6,7,8,9,10,11,12,13,14,15]
# Final count dictionary
news_count = {}
# Get only the files in the directory
onlyfiles = [f for f in listdir(args["<directory>"]) if isfile(join(args["<directory>"], f))]
# Loop over all the files and parse them
for file in onlyfiles:
# Split the filename and get the day/month/year
file_name = file.split("_")
day = file_name[2]
month = file_name[1]
year = file_name[3]
# Compute the week number
week_number = datetime.date(int(year), int(month), int(day)).isocalendar()[1]
# Read and parse the file only if it is in the week range we are considering
if week_number in weeks:
# Read the file
file = pd.read_csv(join(args["<directory>"], file))
# Count how many news we have, considering only the italian ones
total_news = file[file.lang_detected == "it"].count()
# If that year column is still empty, create it and set it to zero
if news_count.get(year, []) == []:
news_count[year] = [0 for x in range(53)]
# Increment the week count
news_count[year] += int(total_news)
# Generate the index for the future dataframe
df_index = []
# Add a zero in front of number less than 10
for i in weeks:
if i < 10:
number = "0" + str(i)
else:
number = str(i)
df_index.append(number)
# Generate the dataframe
final_df = pd.DataFrame(news_count)
final_df.set_index(df_index)
# Print the dataframe to show the result
print(final_df)
# Save it to file
final_df.to_csv(args["<output>"], index_label="Week")
|
|
514e2b3ce6464bd9a4f926fb9c42789ab82bbbd2
|
json_to_db.py
|
json_to_db.py
|
import json
import sys
import sqlite3
import os
no_ending = os.path.splitext(sys.argv[1])[0]
file_fields = no_ending.split("_")
currency = file_fields[-2]
asset = file_fields[-1]
table_name = "candles_{}_{}".format(currency.upper(), asset.upper())
conn = sqlite3.connect(no_ending +".db")
data = json.load(open(sys.argv[1], "r"))
field_headers = [
"start",
"open",
"high",
"low",
"close",
"vwp",
"volume",
"trader",
]
sql_insert = 'insert into candles_{}_{} values (null, ?, ?, ?, ?, ?, ?, ?, ?);'.format(currency, asset)
sql_drop_table = "DROP TABLE IF EXISTS candles_{}_{} ".format(currency, asset)
sql_create_table = '''CREATE TABLE IF NOT EXISTS candles_{}_{} (
id INTEGER PRIMARY KEY AUTOINCREMENT,
start INTEGER UNIQUE,
open REAL NOT NULL,
high REAL NOT NULL,
low REAL NOT NULL,
close REAL NOT NULL,
vwp REAL NOT NULL,
volume REAL NOT NULL,
trades INTEGER NOT NULL
);'''.format(currency, asset)
count = 0
if conn is not None:
c = conn.cursor()
c.execute(sql_drop_table)
c.execute(sql_create_table)
for row in data:
if data:
try:
c.execute(sql_insert, tuple(row))
except sqlite3.IntegrityError:
print("Multiple unique values encountered, ignoring entry")
count = count+1
conn.commit()
conn.close()
else:
print("Cant connect to database")
print(" {} unique rows collided.".format(count))
|
Add script for converting json data blob to .db file
|
Add script for converting json data blob to .db file
|
Python
|
mit
|
F1LT3R/bitcoin-scraper,F1LT3R/bitcoin-scraper
|
Add script for converting json data blob to .db file
|
import json
import sys
import sqlite3
import os
no_ending = os.path.splitext(sys.argv[1])[0]
file_fields = no_ending.split("_")
currency = file_fields[-2]
asset = file_fields[-1]
table_name = "candles_{}_{}".format(currency.upper(), asset.upper())
conn = sqlite3.connect(no_ending +".db")
data = json.load(open(sys.argv[1], "r"))
field_headers = [
"start",
"open",
"high",
"low",
"close",
"vwp",
"volume",
"trader",
]
sql_insert = 'insert into candles_{}_{} values (null, ?, ?, ?, ?, ?, ?, ?, ?);'.format(currency, asset)
sql_drop_table = "DROP TABLE IF EXISTS candles_{}_{} ".format(currency, asset)
sql_create_table = '''CREATE TABLE IF NOT EXISTS candles_{}_{} (
id INTEGER PRIMARY KEY AUTOINCREMENT,
start INTEGER UNIQUE,
open REAL NOT NULL,
high REAL NOT NULL,
low REAL NOT NULL,
close REAL NOT NULL,
vwp REAL NOT NULL,
volume REAL NOT NULL,
trades INTEGER NOT NULL
);'''.format(currency, asset)
count = 0
if conn is not None:
c = conn.cursor()
c.execute(sql_drop_table)
c.execute(sql_create_table)
for row in data:
if data:
try:
c.execute(sql_insert, tuple(row))
except sqlite3.IntegrityError:
print("Multiple unique values encountered, ignoring entry")
count = count+1
conn.commit()
conn.close()
else:
print("Cant connect to database")
print(" {} unique rows collided.".format(count))
|
<commit_before><commit_msg>Add script for converting json data blob to .db file<commit_after>
|
import json
import sys
import sqlite3
import os
no_ending = os.path.splitext(sys.argv[1])[0]
file_fields = no_ending.split("_")
currency = file_fields[-2]
asset = file_fields[-1]
table_name = "candles_{}_{}".format(currency.upper(), asset.upper())
conn = sqlite3.connect(no_ending +".db")
data = json.load(open(sys.argv[1], "r"))
field_headers = [
"start",
"open",
"high",
"low",
"close",
"vwp",
"volume",
"trader",
]
sql_insert = 'insert into candles_{}_{} values (null, ?, ?, ?, ?, ?, ?, ?, ?);'.format(currency, asset)
sql_drop_table = "DROP TABLE IF EXISTS candles_{}_{} ".format(currency, asset)
sql_create_table = '''CREATE TABLE IF NOT EXISTS candles_{}_{} (
id INTEGER PRIMARY KEY AUTOINCREMENT,
start INTEGER UNIQUE,
open REAL NOT NULL,
high REAL NOT NULL,
low REAL NOT NULL,
close REAL NOT NULL,
vwp REAL NOT NULL,
volume REAL NOT NULL,
trades INTEGER NOT NULL
);'''.format(currency, asset)
count = 0
if conn is not None:
c = conn.cursor()
c.execute(sql_drop_table)
c.execute(sql_create_table)
for row in data:
if data:
try:
c.execute(sql_insert, tuple(row))
except sqlite3.IntegrityError:
print("Multiple unique values encountered, ignoring entry")
count = count+1
conn.commit()
conn.close()
else:
print("Cant connect to database")
print(" {} unique rows collided.".format(count))
|
Add script for converting json data blob to .db fileimport json
import sys
import sqlite3
import os
no_ending = os.path.splitext(sys.argv[1])[0]
file_fields = no_ending.split("_")
currency = file_fields[-2]
asset = file_fields[-1]
table_name = "candles_{}_{}".format(currency.upper(), asset.upper())
conn = sqlite3.connect(no_ending +".db")
data = json.load(open(sys.argv[1], "r"))
field_headers = [
"start",
"open",
"high",
"low",
"close",
"vwp",
"volume",
"trader",
]
sql_insert = 'insert into candles_{}_{} values (null, ?, ?, ?, ?, ?, ?, ?, ?);'.format(currency, asset)
sql_drop_table = "DROP TABLE IF EXISTS candles_{}_{} ".format(currency, asset)
sql_create_table = '''CREATE TABLE IF NOT EXISTS candles_{}_{} (
id INTEGER PRIMARY KEY AUTOINCREMENT,
start INTEGER UNIQUE,
open REAL NOT NULL,
high REAL NOT NULL,
low REAL NOT NULL,
close REAL NOT NULL,
vwp REAL NOT NULL,
volume REAL NOT NULL,
trades INTEGER NOT NULL
);'''.format(currency, asset)
count = 0
if conn is not None:
c = conn.cursor()
c.execute(sql_drop_table)
c.execute(sql_create_table)
for row in data:
if data:
try:
c.execute(sql_insert, tuple(row))
except sqlite3.IntegrityError:
print("Multiple unique values encountered, ignoring entry")
count = count+1
conn.commit()
conn.close()
else:
print("Cant connect to database")
print(" {} unique rows collided.".format(count))
|
<commit_before><commit_msg>Add script for converting json data blob to .db file<commit_after>import json
import sys
import sqlite3
import os
no_ending = os.path.splitext(sys.argv[1])[0]
file_fields = no_ending.split("_")
currency = file_fields[-2]
asset = file_fields[-1]
table_name = "candles_{}_{}".format(currency.upper(), asset.upper())
conn = sqlite3.connect(no_ending +".db")
data = json.load(open(sys.argv[1], "r"))
field_headers = [
"start",
"open",
"high",
"low",
"close",
"vwp",
"volume",
"trader",
]
sql_insert = 'insert into candles_{}_{} values (null, ?, ?, ?, ?, ?, ?, ?, ?);'.format(currency, asset)
sql_drop_table = "DROP TABLE IF EXISTS candles_{}_{} ".format(currency, asset)
sql_create_table = '''CREATE TABLE IF NOT EXISTS candles_{}_{} (
id INTEGER PRIMARY KEY AUTOINCREMENT,
start INTEGER UNIQUE,
open REAL NOT NULL,
high REAL NOT NULL,
low REAL NOT NULL,
close REAL NOT NULL,
vwp REAL NOT NULL,
volume REAL NOT NULL,
trades INTEGER NOT NULL
);'''.format(currency, asset)
count = 0
if conn is not None:
c = conn.cursor()
c.execute(sql_drop_table)
c.execute(sql_create_table)
for row in data:
if data:
try:
c.execute(sql_insert, tuple(row))
except sqlite3.IntegrityError:
print("Multiple unique values encountered, ignoring entry")
count = count+1
conn.commit()
conn.close()
else:
print("Cant connect to database")
print(" {} unique rows collided.".format(count))
|
|
7c6c9a688b33961b2eb904ee94e87ec90154260a
|
tests/strings/string_format_s_simple.py
|
tests/strings/string_format_s_simple.py
|
a = "well"
b = "seems to work"
c = "something else"
# form 0
s = "b=%s" % a
print s
# form 1
s = "b,c,d=%s+%s+%s" % (a, b, c)
print s
|
Add a simple test for %s
|
Add a simple test for %s
|
Python
|
mit
|
buchuki/pyjaco,chrivers/pyjaco,qsnake/py2js,chrivers/pyjaco,chrivers/pyjaco,buchuki/pyjaco,mattpap/py2js,buchuki/pyjaco,qsnake/py2js,mattpap/py2js
|
Add a simple test for %s
|
a = "well"
b = "seems to work"
c = "something else"
# form 0
s = "b=%s" % a
print s
# form 1
s = "b,c,d=%s+%s+%s" % (a, b, c)
print s
|
<commit_before><commit_msg>Add a simple test for %s<commit_after>
|
a = "well"
b = "seems to work"
c = "something else"
# form 0
s = "b=%s" % a
print s
# form 1
s = "b,c,d=%s+%s+%s" % (a, b, c)
print s
|
Add a simple test for %sa = "well"
b = "seems to work"
c = "something else"
# form 0
s = "b=%s" % a
print s
# form 1
s = "b,c,d=%s+%s+%s" % (a, b, c)
print s
|
<commit_before><commit_msg>Add a simple test for %s<commit_after>a = "well"
b = "seems to work"
c = "something else"
# form 0
s = "b=%s" % a
print s
# form 1
s = "b,c,d=%s+%s+%s" % (a, b, c)
print s
|
|
13eeba54bdbac256d6b4cd0f56fb92439303d9cc
|
misc/solarized-dark-high-contrast.py
|
misc/solarized-dark-high-contrast.py
|
#!/usr/bin/env python
import sys
if sys.version_info < (3, 4):
sys.exit('ERROR: Requires Python 3.4')
from enum import Enum
def main():
Cases = Enum('Cases', 'lower upper')
infile_case = None
if len(sys.argv) < 2:
sys.stderr.write('ERROR: Must provide a file to modify\n')
sys.exit('Usage: {} FILE'.format(sys.argv[0]))
# Keep these in lists instead of a dict to preserve ordering
color_codes_dark = [
'eee8d5',
'93a1a1',
'839496',
'657b83',
'586e75',
]
color_codes_dark_high_contrast = [
'fdf6e3',
'eee8d5',
'93a1a1',
'839496',
'657b83',
]
with open(sys.argv[1], 'r') as infile:
outfile_data = infile.read()
# Figure out whether the input is using upper or lower case color codes
for color_code in color_codes_dark:
# Skip color codes that don't contain letters
if color_code.lower() == color_code.upper():
continue
if outfile_data.find(color_code.lower()) != -1:
infile_case = Cases.lower
# Use the first one we find as the decisive case
break
elif outfile_data.find(color_code.upper()) != -1:
infile_case = Cases.upper
break
for i in range(len(color_codes_dark)):
if infile_case == Cases.lower:
outfile_data = outfile_data.replace(color_codes_dark[i].lower(), color_codes_dark_high_contrast[i].lower())
outfile_data = outfile_data.replace(color_codes_dark[i].upper(), color_codes_dark_high_contrast[i].lower())
elif infile_case == Cases.upper:
outfile_data = outfile_data.replace(color_codes_dark[i].lower(), color_codes_dark_high_contrast[i].upper())
outfile_data = outfile_data.replace(color_codes_dark[i].upper(), color_codes_dark_high_contrast[i].upper())
with open('{}-high-contrast.{}'.format(*sys.argv[1].rsplit('.', 1)), 'w') as outfile:
outfile.write(outfile_data)
if __name__ == '__main__':
main()
|
Add script for converting a solarized dark file to solarized dark high contrast
|
Add script for converting a solarized dark file to solarized dark high contrast
|
Python
|
mit
|
bmaupin/solarized-dark-high-contrast,bmaupin/solarized-dark-high-contrast
|
Add script for converting a solarized dark file to solarized dark high contrast
|
#!/usr/bin/env python
import sys
if sys.version_info < (3, 4):
sys.exit('ERROR: Requires Python 3.4')
from enum import Enum
def main():
Cases = Enum('Cases', 'lower upper')
infile_case = None
if len(sys.argv) < 2:
sys.stderr.write('ERROR: Must provide a file to modify\n')
sys.exit('Usage: {} FILE'.format(sys.argv[0]))
# Keep these in lists instead of a dict to preserve ordering
color_codes_dark = [
'eee8d5',
'93a1a1',
'839496',
'657b83',
'586e75',
]
color_codes_dark_high_contrast = [
'fdf6e3',
'eee8d5',
'93a1a1',
'839496',
'657b83',
]
with open(sys.argv[1], 'r') as infile:
outfile_data = infile.read()
# Figure out whether the input is using upper or lower case color codes
for color_code in color_codes_dark:
# Skip color codes that don't contain letters
if color_code.lower() == color_code.upper():
continue
if outfile_data.find(color_code.lower()) != -1:
infile_case = Cases.lower
# Use the first one we find as the decisive case
break
elif outfile_data.find(color_code.upper()) != -1:
infile_case = Cases.upper
break
for i in range(len(color_codes_dark)):
if infile_case == Cases.lower:
outfile_data = outfile_data.replace(color_codes_dark[i].lower(), color_codes_dark_high_contrast[i].lower())
outfile_data = outfile_data.replace(color_codes_dark[i].upper(), color_codes_dark_high_contrast[i].lower())
elif infile_case == Cases.upper:
outfile_data = outfile_data.replace(color_codes_dark[i].lower(), color_codes_dark_high_contrast[i].upper())
outfile_data = outfile_data.replace(color_codes_dark[i].upper(), color_codes_dark_high_contrast[i].upper())
with open('{}-high-contrast.{}'.format(*sys.argv[1].rsplit('.', 1)), 'w') as outfile:
outfile.write(outfile_data)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script for converting a solarized dark file to solarized dark high contrast<commit_after>
|
#!/usr/bin/env python
import sys
if sys.version_info < (3, 4):
sys.exit('ERROR: Requires Python 3.4')
from enum import Enum
def main():
Cases = Enum('Cases', 'lower upper')
infile_case = None
if len(sys.argv) < 2:
sys.stderr.write('ERROR: Must provide a file to modify\n')
sys.exit('Usage: {} FILE'.format(sys.argv[0]))
# Keep these in lists instead of a dict to preserve ordering
color_codes_dark = [
'eee8d5',
'93a1a1',
'839496',
'657b83',
'586e75',
]
color_codes_dark_high_contrast = [
'fdf6e3',
'eee8d5',
'93a1a1',
'839496',
'657b83',
]
with open(sys.argv[1], 'r') as infile:
outfile_data = infile.read()
# Figure out whether the input is using upper or lower case color codes
for color_code in color_codes_dark:
# Skip color codes that don't contain letters
if color_code.lower() == color_code.upper():
continue
if outfile_data.find(color_code.lower()) != -1:
infile_case = Cases.lower
# Use the first one we find as the decisive case
break
elif outfile_data.find(color_code.upper()) != -1:
infile_case = Cases.upper
break
for i in range(len(color_codes_dark)):
if infile_case == Cases.lower:
outfile_data = outfile_data.replace(color_codes_dark[i].lower(), color_codes_dark_high_contrast[i].lower())
outfile_data = outfile_data.replace(color_codes_dark[i].upper(), color_codes_dark_high_contrast[i].lower())
elif infile_case == Cases.upper:
outfile_data = outfile_data.replace(color_codes_dark[i].lower(), color_codes_dark_high_contrast[i].upper())
outfile_data = outfile_data.replace(color_codes_dark[i].upper(), color_codes_dark_high_contrast[i].upper())
with open('{}-high-contrast.{}'.format(*sys.argv[1].rsplit('.', 1)), 'w') as outfile:
outfile.write(outfile_data)
if __name__ == '__main__':
main()
|
Add script for converting a solarized dark file to solarized dark high contrast#!/usr/bin/env python
import sys
if sys.version_info < (3, 4):
sys.exit('ERROR: Requires Python 3.4')
from enum import Enum
def main():
Cases = Enum('Cases', 'lower upper')
infile_case = None
if len(sys.argv) < 2:
sys.stderr.write('ERROR: Must provide a file to modify\n')
sys.exit('Usage: {} FILE'.format(sys.argv[0]))
# Keep these in lists instead of a dict to preserve ordering
color_codes_dark = [
'eee8d5',
'93a1a1',
'839496',
'657b83',
'586e75',
]
color_codes_dark_high_contrast = [
'fdf6e3',
'eee8d5',
'93a1a1',
'839496',
'657b83',
]
with open(sys.argv[1], 'r') as infile:
outfile_data = infile.read()
# Figure out whether the input is using upper or lower case color codes
for color_code in color_codes_dark:
# Skip color codes that don't contain letters
if color_code.lower() == color_code.upper():
continue
if outfile_data.find(color_code.lower()) != -1:
infile_case = Cases.lower
# Use the first one we find as the decisive case
break
elif outfile_data.find(color_code.upper()) != -1:
infile_case = Cases.upper
break
for i in range(len(color_codes_dark)):
if infile_case == Cases.lower:
outfile_data = outfile_data.replace(color_codes_dark[i].lower(), color_codes_dark_high_contrast[i].lower())
outfile_data = outfile_data.replace(color_codes_dark[i].upper(), color_codes_dark_high_contrast[i].lower())
elif infile_case == Cases.upper:
outfile_data = outfile_data.replace(color_codes_dark[i].lower(), color_codes_dark_high_contrast[i].upper())
outfile_data = outfile_data.replace(color_codes_dark[i].upper(), color_codes_dark_high_contrast[i].upper())
with open('{}-high-contrast.{}'.format(*sys.argv[1].rsplit('.', 1)), 'w') as outfile:
outfile.write(outfile_data)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script for converting a solarized dark file to solarized dark high contrast<commit_after>#!/usr/bin/env python
import sys
if sys.version_info < (3, 4):
sys.exit('ERROR: Requires Python 3.4')
from enum import Enum
def main():
Cases = Enum('Cases', 'lower upper')
infile_case = None
if len(sys.argv) < 2:
sys.stderr.write('ERROR: Must provide a file to modify\n')
sys.exit('Usage: {} FILE'.format(sys.argv[0]))
# Keep these in lists instead of a dict to preserve ordering
color_codes_dark = [
'eee8d5',
'93a1a1',
'839496',
'657b83',
'586e75',
]
color_codes_dark_high_contrast = [
'fdf6e3',
'eee8d5',
'93a1a1',
'839496',
'657b83',
]
with open(sys.argv[1], 'r') as infile:
outfile_data = infile.read()
# Figure out whether the input is using upper or lower case color codes
for color_code in color_codes_dark:
# Skip color codes that don't contain letters
if color_code.lower() == color_code.upper():
continue
if outfile_data.find(color_code.lower()) != -1:
infile_case = Cases.lower
# Use the first one we find as the decisive case
break
elif outfile_data.find(color_code.upper()) != -1:
infile_case = Cases.upper
break
for i in range(len(color_codes_dark)):
if infile_case == Cases.lower:
outfile_data = outfile_data.replace(color_codes_dark[i].lower(), color_codes_dark_high_contrast[i].lower())
outfile_data = outfile_data.replace(color_codes_dark[i].upper(), color_codes_dark_high_contrast[i].lower())
elif infile_case == Cases.upper:
outfile_data = outfile_data.replace(color_codes_dark[i].lower(), color_codes_dark_high_contrast[i].upper())
outfile_data = outfile_data.replace(color_codes_dark[i].upper(), color_codes_dark_high_contrast[i].upper())
with open('{}-high-contrast.{}'.format(*sys.argv[1].rsplit('.', 1)), 'w') as outfile:
outfile.write(outfile_data)
if __name__ == '__main__':
main()
|
|
25d8fab731e0722d4af0bd711dd5acfb2fe41611
|
test/test_seq_conservation.py
|
test/test_seq_conservation.py
|
import unittest
import utils
import os
import sys
import shutil
import contextlib
TOPDIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(os.path.join(TOPDIR, 'lib'))
sys.path.append(TOPDIR)
import seqConservation
@contextlib.contextmanager
def mock_usearch():
"""Make mock usearch binary"""
subdir = 'mock_usearch'
os.mkdir(subdir)
fname = os.path.join(subdir, 'usearch')
with open(fname, 'w') as fh:
fh.write("""#!/usr/bin/env python
import sys
outf = sys.argv[4]
with open(outf, 'w') as fh:
fh.write('\\t'.join(['S', '0', '292', '*', '*', '*', '*', '*',
'AH70_12410', '*\\n']))
fh.write('\\t'.join(['L', '0', '292', '*', '*', '*', '*', '*',
'AH70_12410', '*\\n']))
fh.write('\\t'.join(['H', '0', '292', '99.7', '+', '0', '0', '292M',
'EN70_12566', 'AH70_12410\\n']))
fh.write('\\t'.join(['S', '1', '292', '*', '*', '*', '*', '*',
'EX70_12567', '*\\n']))
fh.write('\\t'.join(['H', '1', '292', '98.2', '+', '0', '0', '292M',
'AH70_12410', 'EX70_12567\\n']))
""")
os.chmod(fname, 0775)
oldpath = os.environ['PATH']
os.environ['PATH'] = subdir + ':' + os.environ['PATH']
yield
os.environ['PATH'] = oldpath
class Tests(unittest.TestCase):
def test_ucluster(self):
"""Test ucluster() function"""
with utils.temporary_working_directory() as tmpdir:
with mock_usearch():
clusters = seqConservation.ucluster('dummy.ali')
self.assertEqual(len(clusters), 4)
if __name__ == '__main__':
unittest.main()
|
Add basic test for usearch clustering.
|
Add basic test for usearch clustering.
|
Python
|
lgpl-2.1
|
salilab/cryptosite,salilab/cryptosite,salilab/cryptosite
|
Add basic test for usearch clustering.
|
import unittest
import utils
import os
import sys
import shutil
import contextlib
TOPDIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(os.path.join(TOPDIR, 'lib'))
sys.path.append(TOPDIR)
import seqConservation
@contextlib.contextmanager
def mock_usearch():
"""Make mock usearch binary"""
subdir = 'mock_usearch'
os.mkdir(subdir)
fname = os.path.join(subdir, 'usearch')
with open(fname, 'w') as fh:
fh.write("""#!/usr/bin/env python
import sys
outf = sys.argv[4]
with open(outf, 'w') as fh:
fh.write('\\t'.join(['S', '0', '292', '*', '*', '*', '*', '*',
'AH70_12410', '*\\n']))
fh.write('\\t'.join(['L', '0', '292', '*', '*', '*', '*', '*',
'AH70_12410', '*\\n']))
fh.write('\\t'.join(['H', '0', '292', '99.7', '+', '0', '0', '292M',
'EN70_12566', 'AH70_12410\\n']))
fh.write('\\t'.join(['S', '1', '292', '*', '*', '*', '*', '*',
'EX70_12567', '*\\n']))
fh.write('\\t'.join(['H', '1', '292', '98.2', '+', '0', '0', '292M',
'AH70_12410', 'EX70_12567\\n']))
""")
os.chmod(fname, 0775)
oldpath = os.environ['PATH']
os.environ['PATH'] = subdir + ':' + os.environ['PATH']
yield
os.environ['PATH'] = oldpath
class Tests(unittest.TestCase):
def test_ucluster(self):
"""Test ucluster() function"""
with utils.temporary_working_directory() as tmpdir:
with mock_usearch():
clusters = seqConservation.ucluster('dummy.ali')
self.assertEqual(len(clusters), 4)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add basic test for usearch clustering.<commit_after>
|
import unittest
import utils
import os
import sys
import shutil
import contextlib
TOPDIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(os.path.join(TOPDIR, 'lib'))
sys.path.append(TOPDIR)
import seqConservation
@contextlib.contextmanager
def mock_usearch():
"""Make mock usearch binary"""
subdir = 'mock_usearch'
os.mkdir(subdir)
fname = os.path.join(subdir, 'usearch')
with open(fname, 'w') as fh:
fh.write("""#!/usr/bin/env python
import sys
outf = sys.argv[4]
with open(outf, 'w') as fh:
fh.write('\\t'.join(['S', '0', '292', '*', '*', '*', '*', '*',
'AH70_12410', '*\\n']))
fh.write('\\t'.join(['L', '0', '292', '*', '*', '*', '*', '*',
'AH70_12410', '*\\n']))
fh.write('\\t'.join(['H', '0', '292', '99.7', '+', '0', '0', '292M',
'EN70_12566', 'AH70_12410\\n']))
fh.write('\\t'.join(['S', '1', '292', '*', '*', '*', '*', '*',
'EX70_12567', '*\\n']))
fh.write('\\t'.join(['H', '1', '292', '98.2', '+', '0', '0', '292M',
'AH70_12410', 'EX70_12567\\n']))
""")
os.chmod(fname, 0775)
oldpath = os.environ['PATH']
os.environ['PATH'] = subdir + ':' + os.environ['PATH']
yield
os.environ['PATH'] = oldpath
class Tests(unittest.TestCase):
def test_ucluster(self):
"""Test ucluster() function"""
with utils.temporary_working_directory() as tmpdir:
with mock_usearch():
clusters = seqConservation.ucluster('dummy.ali')
self.assertEqual(len(clusters), 4)
if __name__ == '__main__':
unittest.main()
|
Add basic test for usearch clustering.import unittest
import utils
import os
import sys
import shutil
import contextlib
TOPDIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(os.path.join(TOPDIR, 'lib'))
sys.path.append(TOPDIR)
import seqConservation
@contextlib.contextmanager
def mock_usearch():
"""Make mock usearch binary"""
subdir = 'mock_usearch'
os.mkdir(subdir)
fname = os.path.join(subdir, 'usearch')
with open(fname, 'w') as fh:
fh.write("""#!/usr/bin/env python
import sys
outf = sys.argv[4]
with open(outf, 'w') as fh:
fh.write('\\t'.join(['S', '0', '292', '*', '*', '*', '*', '*',
'AH70_12410', '*\\n']))
fh.write('\\t'.join(['L', '0', '292', '*', '*', '*', '*', '*',
'AH70_12410', '*\\n']))
fh.write('\\t'.join(['H', '0', '292', '99.7', '+', '0', '0', '292M',
'EN70_12566', 'AH70_12410\\n']))
fh.write('\\t'.join(['S', '1', '292', '*', '*', '*', '*', '*',
'EX70_12567', '*\\n']))
fh.write('\\t'.join(['H', '1', '292', '98.2', '+', '0', '0', '292M',
'AH70_12410', 'EX70_12567\\n']))
""")
os.chmod(fname, 0775)
oldpath = os.environ['PATH']
os.environ['PATH'] = subdir + ':' + os.environ['PATH']
yield
os.environ['PATH'] = oldpath
class Tests(unittest.TestCase):
def test_ucluster(self):
"""Test ucluster() function"""
with utils.temporary_working_directory() as tmpdir:
with mock_usearch():
clusters = seqConservation.ucluster('dummy.ali')
self.assertEqual(len(clusters), 4)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add basic test for usearch clustering.<commit_after>import unittest
import utils
import os
import sys
import shutil
import contextlib
TOPDIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(os.path.join(TOPDIR, 'lib'))
sys.path.append(TOPDIR)
import seqConservation
@contextlib.contextmanager
def mock_usearch():
"""Make mock usearch binary"""
subdir = 'mock_usearch'
os.mkdir(subdir)
fname = os.path.join(subdir, 'usearch')
with open(fname, 'w') as fh:
fh.write("""#!/usr/bin/env python
import sys
outf = sys.argv[4]
with open(outf, 'w') as fh:
fh.write('\\t'.join(['S', '0', '292', '*', '*', '*', '*', '*',
'AH70_12410', '*\\n']))
fh.write('\\t'.join(['L', '0', '292', '*', '*', '*', '*', '*',
'AH70_12410', '*\\n']))
fh.write('\\t'.join(['H', '0', '292', '99.7', '+', '0', '0', '292M',
'EN70_12566', 'AH70_12410\\n']))
fh.write('\\t'.join(['S', '1', '292', '*', '*', '*', '*', '*',
'EX70_12567', '*\\n']))
fh.write('\\t'.join(['H', '1', '292', '98.2', '+', '0', '0', '292M',
'AH70_12410', 'EX70_12567\\n']))
""")
os.chmod(fname, 0775)
oldpath = os.environ['PATH']
os.environ['PATH'] = subdir + ':' + os.environ['PATH']
yield
os.environ['PATH'] = oldpath
class Tests(unittest.TestCase):
def test_ucluster(self):
"""Test ucluster() function"""
with utils.temporary_working_directory() as tmpdir:
with mock_usearch():
clusters = seqConservation.ucluster('dummy.ali')
self.assertEqual(len(clusters), 4)
if __name__ == '__main__':
unittest.main()
|
|
e57fe864a312b6a1f5493c5b4687568bad99949c
|
numba/cuda/tests/cudapy/test_lang.py
|
numba/cuda/tests/cudapy/test_lang.py
|
"""
Test basic language features
"""
from __future__ import print_function, absolute_import, division
import numpy as np
from numba import cuda
from numba.cuda.testing import unittest
class TestLang(unittest.TestCase):
def test_enumerate(self):
tup = (1., 2.5, 3.)
@cuda.jit("void(float64[:])")
def foo(a):
for i, v in enumerate(tup):
a[i] = v
a = np.zeros(len(tup))
foo(a)
self.assertTrue(np.all(a == tup))
def test_zip(self):
t1 = (1, 2, 3)
t2 = (4.5, 5.6, 6.7)
@cuda.jit("void(float64[:])")
def foo(a):
c = 0
for i, j in zip(t1, t2):
c += i + j
a[0] = c
a = np.zeros(1)
foo(a)
b = np.array(t1)
c = np.array(t2)
self.assertTrue(np.all(a == (b + c).sum()))
if __name__ == '__main__':
unittest.main()
|
Test new language feature in cuda python
|
Test new language feature in cuda python
|
Python
|
bsd-2-clause
|
jriehl/numba,numba/numba,pombredanne/numba,cpcloud/numba,gmarkall/numba,GaZ3ll3/numba,gdementen/numba,seibert/numba,stonebig/numba,GaZ3ll3/numba,stonebig/numba,seibert/numba,ssarangi/numba,gmarkall/numba,pitrou/numba,jriehl/numba,pombredanne/numba,numba/numba,IntelLabs/numba,stuartarchibald/numba,cpcloud/numba,gdementen/numba,ssarangi/numba,jriehl/numba,pombredanne/numba,sklam/numba,stonebig/numba,gmarkall/numba,stefanseefeld/numba,pombredanne/numba,stonebig/numba,pitrou/numba,GaZ3ll3/numba,GaZ3ll3/numba,stefanseefeld/numba,numba/numba,seibert/numba,stuartarchibald/numba,gdementen/numba,pitrou/numba,stuartarchibald/numba,IntelLabs/numba,GaZ3ll3/numba,IntelLabs/numba,sklam/numba,cpcloud/numba,numba/numba,seibert/numba,IntelLabs/numba,pombredanne/numba,sklam/numba,sklam/numba,stefanseefeld/numba,gmarkall/numba,stonebig/numba,stefanseefeld/numba,stuartarchibald/numba,gdementen/numba,numba/numba,pitrou/numba,cpcloud/numba,IntelLabs/numba,sklam/numba,seibert/numba,ssarangi/numba,ssarangi/numba,pitrou/numba,jriehl/numba,gdementen/numba,gmarkall/numba,stuartarchibald/numba,jriehl/numba,ssarangi/numba,cpcloud/numba,stefanseefeld/numba
|
Test new language feature in cuda python
|
"""
Test basic language features
"""
from __future__ import print_function, absolute_import, division
import numpy as np
from numba import cuda
from numba.cuda.testing import unittest
class TestLang(unittest.TestCase):
def test_enumerate(self):
tup = (1., 2.5, 3.)
@cuda.jit("void(float64[:])")
def foo(a):
for i, v in enumerate(tup):
a[i] = v
a = np.zeros(len(tup))
foo(a)
self.assertTrue(np.all(a == tup))
def test_zip(self):
t1 = (1, 2, 3)
t2 = (4.5, 5.6, 6.7)
@cuda.jit("void(float64[:])")
def foo(a):
c = 0
for i, j in zip(t1, t2):
c += i + j
a[0] = c
a = np.zeros(1)
foo(a)
b = np.array(t1)
c = np.array(t2)
self.assertTrue(np.all(a == (b + c).sum()))
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Test new language feature in cuda python<commit_after>
|
"""
Test basic language features
"""
from __future__ import print_function, absolute_import, division
import numpy as np
from numba import cuda
from numba.cuda.testing import unittest
class TestLang(unittest.TestCase):
def test_enumerate(self):
tup = (1., 2.5, 3.)
@cuda.jit("void(float64[:])")
def foo(a):
for i, v in enumerate(tup):
a[i] = v
a = np.zeros(len(tup))
foo(a)
self.assertTrue(np.all(a == tup))
def test_zip(self):
t1 = (1, 2, 3)
t2 = (4.5, 5.6, 6.7)
@cuda.jit("void(float64[:])")
def foo(a):
c = 0
for i, j in zip(t1, t2):
c += i + j
a[0] = c
a = np.zeros(1)
foo(a)
b = np.array(t1)
c = np.array(t2)
self.assertTrue(np.all(a == (b + c).sum()))
if __name__ == '__main__':
unittest.main()
|
Test new language feature in cuda python"""
Test basic language features
"""
from __future__ import print_function, absolute_import, division
import numpy as np
from numba import cuda
from numba.cuda.testing import unittest
class TestLang(unittest.TestCase):
def test_enumerate(self):
tup = (1., 2.5, 3.)
@cuda.jit("void(float64[:])")
def foo(a):
for i, v in enumerate(tup):
a[i] = v
a = np.zeros(len(tup))
foo(a)
self.assertTrue(np.all(a == tup))
def test_zip(self):
t1 = (1, 2, 3)
t2 = (4.5, 5.6, 6.7)
@cuda.jit("void(float64[:])")
def foo(a):
c = 0
for i, j in zip(t1, t2):
c += i + j
a[0] = c
a = np.zeros(1)
foo(a)
b = np.array(t1)
c = np.array(t2)
self.assertTrue(np.all(a == (b + c).sum()))
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Test new language feature in cuda python<commit_after>"""
Test basic language features
"""
from __future__ import print_function, absolute_import, division
import numpy as np
from numba import cuda
from numba.cuda.testing import unittest
class TestLang(unittest.TestCase):
def test_enumerate(self):
tup = (1., 2.5, 3.)
@cuda.jit("void(float64[:])")
def foo(a):
for i, v in enumerate(tup):
a[i] = v
a = np.zeros(len(tup))
foo(a)
self.assertTrue(np.all(a == tup))
def test_zip(self):
t1 = (1, 2, 3)
t2 = (4.5, 5.6, 6.7)
@cuda.jit("void(float64[:])")
def foo(a):
c = 0
for i, j in zip(t1, t2):
c += i + j
a[0] = c
a = np.zeros(1)
foo(a)
b = np.array(t1)
c = np.array(t2)
self.assertTrue(np.all(a == (b + c).sum()))
if __name__ == '__main__':
unittest.main()
|
|
b6deb0495f1a31801d95e2d59de7ea2731eb8970
|
home/bin/h2t.py
|
home/bin/h2t.py
|
import argparse
import html2text
import pypandoc
def main():
p = argparse.ArgumentParser()
h = html2text.HTML2Text()
p.add_argument("filename")
filename = p.parse_args().filename
h.ignore_emphasis = True
h.body_width = 0
h.use_automatic_links = False
h.wrap_links = False
h.single_line_break = True
with open(filename, "r") as data:
try:
text = data.read()
except UnicodeDecodeError:
with open(filename, mode="r", encoding="iso-8859-1") as data:
text = data.read()
init = h.handle(text)
print(init)
if __name__ == "__main__":
main()
|
Add html to text script
|
Add html to text script
|
Python
|
mit
|
dahlbaek/Ubuntu-dotfiles
|
Add html to text script
|
import argparse
import html2text
import pypandoc
def main():
p = argparse.ArgumentParser()
h = html2text.HTML2Text()
p.add_argument("filename")
filename = p.parse_args().filename
h.ignore_emphasis = True
h.body_width = 0
h.use_automatic_links = False
h.wrap_links = False
h.single_line_break = True
with open(filename, "r") as data:
try:
text = data.read()
except UnicodeDecodeError:
with open(filename, mode="r", encoding="iso-8859-1") as data:
text = data.read()
init = h.handle(text)
print(init)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add html to text script<commit_after>
|
import argparse
import html2text
import pypandoc
def main():
p = argparse.ArgumentParser()
h = html2text.HTML2Text()
p.add_argument("filename")
filename = p.parse_args().filename
h.ignore_emphasis = True
h.body_width = 0
h.use_automatic_links = False
h.wrap_links = False
h.single_line_break = True
with open(filename, "r") as data:
try:
text = data.read()
except UnicodeDecodeError:
with open(filename, mode="r", encoding="iso-8859-1") as data:
text = data.read()
init = h.handle(text)
print(init)
if __name__ == "__main__":
main()
|
Add html to text scriptimport argparse
import html2text
import pypandoc
def main():
p = argparse.ArgumentParser()
h = html2text.HTML2Text()
p.add_argument("filename")
filename = p.parse_args().filename
h.ignore_emphasis = True
h.body_width = 0
h.use_automatic_links = False
h.wrap_links = False
h.single_line_break = True
with open(filename, "r") as data:
try:
text = data.read()
except UnicodeDecodeError:
with open(filename, mode="r", encoding="iso-8859-1") as data:
text = data.read()
init = h.handle(text)
print(init)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add html to text script<commit_after>import argparse
import html2text
import pypandoc
def main():
p = argparse.ArgumentParser()
h = html2text.HTML2Text()
p.add_argument("filename")
filename = p.parse_args().filename
h.ignore_emphasis = True
h.body_width = 0
h.use_automatic_links = False
h.wrap_links = False
h.single_line_break = True
with open(filename, "r") as data:
try:
text = data.read()
except UnicodeDecodeError:
with open(filename, mode="r", encoding="iso-8859-1") as data:
text = data.read()
init = h.handle(text)
print(init)
if __name__ == "__main__":
main()
|
|
b73fa8f8daaef9130563cfa2f7d0546c6e393d33
|
tools/dev/wc-ng/graph-data.py
|
tools/dev/wc-ng/graph-data.py
|
#!/usr/bin/env python
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
from matplotlib import pylab
import numpy as np
import csv
import sys
min_rev = 35000
data_reader = csv.reader(open('data.csv'))
data = []
for row in data_reader:
row = row[:-1]
if row[0] == 'Revision':
data.append(row)
continue
if int(row[0]) < min_rev:
continue
for i, x in enumerate(row):
if i <= 1:
row[i] = int(row[i])
else:
row[i] = int(row[i-1]) + int(row[i])
data.append(row)
x = [d[0] for d in data[1:]]
data = [d[1:] for d in data]
y = zip(*data)
l = []
for i, foo in enumerate(y):
ln = plt.plot(x, foo[1:], linewidth=1)
l.append(ln)
plt.figlegend(l, data[0], 'lower left')
plt.fill_between(x, 0, y[0][1:], facecolor=l[0].color)
#for i in range(0, len(y)-1):
# plt.fill_between(x, y[i][1:], y[i+1][1:])
plt.xlabel('Revision')
plt.ylabel('Symbol Count')
plt.show()
png = open('chart2.png', 'w')
plt.savefig(png)
|
Add yet another script, this one for doing an area graph of the wc-ng data collected by gather-data.sh.
|
Add yet another script, this one for doing an area graph of the wc-ng data
collected by gather-data.sh.
* tools/dev/wc-ng/graph-data.py:
New.
git-svn-id: f8a4e5e023278da1e04e203c7fe051e3c4285d88@880258 13f79535-47bb-0310-9956-ffa450edef68
|
Python
|
apache-2.0
|
YueLinHo/Subversion,YueLinHo/Subversion,wbond/subversion,YueLinHo/Subversion,YueLinHo/Subversion,wbond/subversion,YueLinHo/Subversion,wbond/subversion,YueLinHo/Subversion,wbond/subversion,wbond/subversion,YueLinHo/Subversion,wbond/subversion,wbond/subversion,YueLinHo/Subversion
|
Add yet another script, this one for doing an area graph of the wc-ng data
collected by gather-data.sh.
* tools/dev/wc-ng/graph-data.py:
New.
git-svn-id: f8a4e5e023278da1e04e203c7fe051e3c4285d88@880258 13f79535-47bb-0310-9956-ffa450edef68
|
#!/usr/bin/env python
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
from matplotlib import pylab
import numpy as np
import csv
import sys
min_rev = 35000
data_reader = csv.reader(open('data.csv'))
data = []
for row in data_reader:
row = row[:-1]
if row[0] == 'Revision':
data.append(row)
continue
if int(row[0]) < min_rev:
continue
for i, x in enumerate(row):
if i <= 1:
row[i] = int(row[i])
else:
row[i] = int(row[i-1]) + int(row[i])
data.append(row)
x = [d[0] for d in data[1:]]
data = [d[1:] for d in data]
y = zip(*data)
l = []
for i, foo in enumerate(y):
ln = plt.plot(x, foo[1:], linewidth=1)
l.append(ln)
plt.figlegend(l, data[0], 'lower left')
plt.fill_between(x, 0, y[0][1:], facecolor=l[0].color)
#for i in range(0, len(y)-1):
# plt.fill_between(x, y[i][1:], y[i+1][1:])
plt.xlabel('Revision')
plt.ylabel('Symbol Count')
plt.show()
png = open('chart2.png', 'w')
plt.savefig(png)
|
<commit_before><commit_msg>Add yet another script, this one for doing an area graph of the wc-ng data
collected by gather-data.sh.
* tools/dev/wc-ng/graph-data.py:
New.
git-svn-id: f8a4e5e023278da1e04e203c7fe051e3c4285d88@880258 13f79535-47bb-0310-9956-ffa450edef68<commit_after>
|
#!/usr/bin/env python
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
from matplotlib import pylab
import numpy as np
import csv
import sys
min_rev = 35000
data_reader = csv.reader(open('data.csv'))
data = []
for row in data_reader:
row = row[:-1]
if row[0] == 'Revision':
data.append(row)
continue
if int(row[0]) < min_rev:
continue
for i, x in enumerate(row):
if i <= 1:
row[i] = int(row[i])
else:
row[i] = int(row[i-1]) + int(row[i])
data.append(row)
x = [d[0] for d in data[1:]]
data = [d[1:] for d in data]
y = zip(*data)
l = []
for i, foo in enumerate(y):
ln = plt.plot(x, foo[1:], linewidth=1)
l.append(ln)
plt.figlegend(l, data[0], 'lower left')
plt.fill_between(x, 0, y[0][1:], facecolor=l[0].color)
#for i in range(0, len(y)-1):
# plt.fill_between(x, y[i][1:], y[i+1][1:])
plt.xlabel('Revision')
plt.ylabel('Symbol Count')
plt.show()
png = open('chart2.png', 'w')
plt.savefig(png)
|
Add yet another script, this one for doing an area graph of the wc-ng data
collected by gather-data.sh.
* tools/dev/wc-ng/graph-data.py:
New.
git-svn-id: f8a4e5e023278da1e04e203c7fe051e3c4285d88@880258 13f79535-47bb-0310-9956-ffa450edef68#!/usr/bin/env python
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
from matplotlib import pylab
import numpy as np
import csv
import sys
min_rev = 35000
data_reader = csv.reader(open('data.csv'))
data = []
for row in data_reader:
row = row[:-1]
if row[0] == 'Revision':
data.append(row)
continue
if int(row[0]) < min_rev:
continue
for i, x in enumerate(row):
if i <= 1:
row[i] = int(row[i])
else:
row[i] = int(row[i-1]) + int(row[i])
data.append(row)
x = [d[0] for d in data[1:]]
data = [d[1:] for d in data]
y = zip(*data)
l = []
for i, foo in enumerate(y):
ln = plt.plot(x, foo[1:], linewidth=1)
l.append(ln)
plt.figlegend(l, data[0], 'lower left')
plt.fill_between(x, 0, y[0][1:], facecolor=l[0].color)
#for i in range(0, len(y)-1):
# plt.fill_between(x, y[i][1:], y[i+1][1:])
plt.xlabel('Revision')
plt.ylabel('Symbol Count')
plt.show()
png = open('chart2.png', 'w')
plt.savefig(png)
|
<commit_before><commit_msg>Add yet another script, this one for doing an area graph of the wc-ng data
collected by gather-data.sh.
* tools/dev/wc-ng/graph-data.py:
New.
git-svn-id: f8a4e5e023278da1e04e203c7fe051e3c4285d88@880258 13f79535-47bb-0310-9956-ffa450edef68<commit_after>#!/usr/bin/env python
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
from matplotlib import pylab
import numpy as np
import csv
import sys
min_rev = 35000
data_reader = csv.reader(open('data.csv'))
data = []
for row in data_reader:
row = row[:-1]
if row[0] == 'Revision':
data.append(row)
continue
if int(row[0]) < min_rev:
continue
for i, x in enumerate(row):
if i <= 1:
row[i] = int(row[i])
else:
row[i] = int(row[i-1]) + int(row[i])
data.append(row)
x = [d[0] for d in data[1:]]
data = [d[1:] for d in data]
y = zip(*data)
l = []
for i, foo in enumerate(y):
ln = plt.plot(x, foo[1:], linewidth=1)
l.append(ln)
plt.figlegend(l, data[0], 'lower left')
plt.fill_between(x, 0, y[0][1:], facecolor=l[0].color)
#for i in range(0, len(y)-1):
# plt.fill_between(x, y[i][1:], y[i+1][1:])
plt.xlabel('Revision')
plt.ylabel('Symbol Count')
plt.show()
png = open('chart2.png', 'w')
plt.savefig(png)
|
|
15916aa477163ce8613aaec61ab2a114018d1071
|
tests/compiler/test_inheritance.py
|
tests/compiler/test_inheritance.py
|
import pytest
from tests.compiler import compile_snippet, A_INST, B_INST, C_INST, compile_base, A_THING, B_THING
from thinglang.compiler.errors import InvalidReference
from thinglang.compiler.opcodes import OpcodePushMember, OpcodeInstantiate, OpcodeReturn, OpcodePushLocal, \
OpcodePopMember
def test_regular_inheritance_member_access():
assert compile_snippet('a_inst.a1') == [
OpcodePushMember(A_INST, 0)
]
assert compile_snippet('b_inst.b1') == [
OpcodePushMember(B_INST, 1)
]
assert compile_snippet('b_inst.a1') == [
OpcodePushMember(B_INST, 0)
]
def test_regular_inheritance_invalid_member_access():
with pytest.raises(InvalidReference):
compile_snippet('a_inst.b1')
with pytest.raises(InvalidReference):
compile_snippet('b_inst.c1')
def test_constructor_compilation():
assert compile_base(thing_id=A_THING) == [
OpcodeInstantiate(0, 1), # Implicit constructor (0-arg), 1 member
OpcodeReturn()
]
assert compile_base(thing_id=B_THING) == [
OpcodeInstantiate(1, 2), # Explicit constructor (1-arg), 1 member +
OpcodePushLocal(1), # Push the first argument
OpcodePopMember(0, 1), # Set it as the second member of self
OpcodeReturn()
]
|
Add initial test for inheritance pattern compilation
|
Add initial test for inheritance pattern compilation
|
Python
|
mit
|
ytanay/thinglang,ytanay/thinglang,ytanay/thinglang,ytanay/thinglang
|
Add initial test for inheritance pattern compilation
|
import pytest
from tests.compiler import compile_snippet, A_INST, B_INST, C_INST, compile_base, A_THING, B_THING
from thinglang.compiler.errors import InvalidReference
from thinglang.compiler.opcodes import OpcodePushMember, OpcodeInstantiate, OpcodeReturn, OpcodePushLocal, \
OpcodePopMember
def test_regular_inheritance_member_access():
assert compile_snippet('a_inst.a1') == [
OpcodePushMember(A_INST, 0)
]
assert compile_snippet('b_inst.b1') == [
OpcodePushMember(B_INST, 1)
]
assert compile_snippet('b_inst.a1') == [
OpcodePushMember(B_INST, 0)
]
def test_regular_inheritance_invalid_member_access():
with pytest.raises(InvalidReference):
compile_snippet('a_inst.b1')
with pytest.raises(InvalidReference):
compile_snippet('b_inst.c1')
def test_constructor_compilation():
assert compile_base(thing_id=A_THING) == [
OpcodeInstantiate(0, 1), # Implicit constructor (0-arg), 1 member
OpcodeReturn()
]
assert compile_base(thing_id=B_THING) == [
OpcodeInstantiate(1, 2), # Explicit constructor (1-arg), 1 member +
OpcodePushLocal(1), # Push the first argument
OpcodePopMember(0, 1), # Set it as the second member of self
OpcodeReturn()
]
|
<commit_before><commit_msg>Add initial test for inheritance pattern compilation<commit_after>
|
import pytest
from tests.compiler import compile_snippet, A_INST, B_INST, C_INST, compile_base, A_THING, B_THING
from thinglang.compiler.errors import InvalidReference
from thinglang.compiler.opcodes import OpcodePushMember, OpcodeInstantiate, OpcodeReturn, OpcodePushLocal, \
OpcodePopMember
def test_regular_inheritance_member_access():
assert compile_snippet('a_inst.a1') == [
OpcodePushMember(A_INST, 0)
]
assert compile_snippet('b_inst.b1') == [
OpcodePushMember(B_INST, 1)
]
assert compile_snippet('b_inst.a1') == [
OpcodePushMember(B_INST, 0)
]
def test_regular_inheritance_invalid_member_access():
with pytest.raises(InvalidReference):
compile_snippet('a_inst.b1')
with pytest.raises(InvalidReference):
compile_snippet('b_inst.c1')
def test_constructor_compilation():
assert compile_base(thing_id=A_THING) == [
OpcodeInstantiate(0, 1), # Implicit constructor (0-arg), 1 member
OpcodeReturn()
]
assert compile_base(thing_id=B_THING) == [
OpcodeInstantiate(1, 2), # Explicit constructor (1-arg), 1 member +
OpcodePushLocal(1), # Push the first argument
OpcodePopMember(0, 1), # Set it as the second member of self
OpcodeReturn()
]
|
Add initial test for inheritance pattern compilationimport pytest
from tests.compiler import compile_snippet, A_INST, B_INST, C_INST, compile_base, A_THING, B_THING
from thinglang.compiler.errors import InvalidReference
from thinglang.compiler.opcodes import OpcodePushMember, OpcodeInstantiate, OpcodeReturn, OpcodePushLocal, \
OpcodePopMember
def test_regular_inheritance_member_access():
assert compile_snippet('a_inst.a1') == [
OpcodePushMember(A_INST, 0)
]
assert compile_snippet('b_inst.b1') == [
OpcodePushMember(B_INST, 1)
]
assert compile_snippet('b_inst.a1') == [
OpcodePushMember(B_INST, 0)
]
def test_regular_inheritance_invalid_member_access():
with pytest.raises(InvalidReference):
compile_snippet('a_inst.b1')
with pytest.raises(InvalidReference):
compile_snippet('b_inst.c1')
def test_constructor_compilation():
assert compile_base(thing_id=A_THING) == [
OpcodeInstantiate(0, 1), # Implicit constructor (0-arg), 1 member
OpcodeReturn()
]
assert compile_base(thing_id=B_THING) == [
OpcodeInstantiate(1, 2), # Explicit constructor (1-arg), 1 member +
OpcodePushLocal(1), # Push the first argument
OpcodePopMember(0, 1), # Set it as the second member of self
OpcodeReturn()
]
|
<commit_before><commit_msg>Add initial test for inheritance pattern compilation<commit_after>import pytest
from tests.compiler import compile_snippet, A_INST, B_INST, C_INST, compile_base, A_THING, B_THING
from thinglang.compiler.errors import InvalidReference
from thinglang.compiler.opcodes import OpcodePushMember, OpcodeInstantiate, OpcodeReturn, OpcodePushLocal, \
OpcodePopMember
def test_regular_inheritance_member_access():
assert compile_snippet('a_inst.a1') == [
OpcodePushMember(A_INST, 0)
]
assert compile_snippet('b_inst.b1') == [
OpcodePushMember(B_INST, 1)
]
assert compile_snippet('b_inst.a1') == [
OpcodePushMember(B_INST, 0)
]
def test_regular_inheritance_invalid_member_access():
with pytest.raises(InvalidReference):
compile_snippet('a_inst.b1')
with pytest.raises(InvalidReference):
compile_snippet('b_inst.c1')
def test_constructor_compilation():
assert compile_base(thing_id=A_THING) == [
OpcodeInstantiate(0, 1), # Implicit constructor (0-arg), 1 member
OpcodeReturn()
]
assert compile_base(thing_id=B_THING) == [
OpcodeInstantiate(1, 2), # Explicit constructor (1-arg), 1 member +
OpcodePushLocal(1), # Push the first argument
OpcodePopMember(0, 1), # Set it as the second member of self
OpcodeReturn()
]
|
|
edab1cbfb3aada1378e1b248075909a2ba717efa
|
rtei/migrations/0003_rteidocument_file_size.py
|
rtei/migrations/0003_rteidocument_file_size.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-01-11 08:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rtei', '0002_auto_20190109_1413'),
]
operations = [
migrations.AddField(
model_name='rteidocument',
name='file_size',
field=models.PositiveIntegerField(editable=False, null=True),
),
]
|
Add rteidocument file size migration
|
Add rteidocument file size migration
(apply 3rd)
|
Python
|
agpl-3.0
|
okfn/rtei,okfn/rtei,okfn/rtei,okfn/rtei
|
Add rteidocument file size migration
(apply 3rd)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-01-11 08:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rtei', '0002_auto_20190109_1413'),
]
operations = [
migrations.AddField(
model_name='rteidocument',
name='file_size',
field=models.PositiveIntegerField(editable=False, null=True),
),
]
|
<commit_before><commit_msg>Add rteidocument file size migration
(apply 3rd)<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-01-11 08:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rtei', '0002_auto_20190109_1413'),
]
operations = [
migrations.AddField(
model_name='rteidocument',
name='file_size',
field=models.PositiveIntegerField(editable=False, null=True),
),
]
|
Add rteidocument file size migration
(apply 3rd)# -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-01-11 08:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rtei', '0002_auto_20190109_1413'),
]
operations = [
migrations.AddField(
model_name='rteidocument',
name='file_size',
field=models.PositiveIntegerField(editable=False, null=True),
),
]
|
<commit_before><commit_msg>Add rteidocument file size migration
(apply 3rd)<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-01-11 08:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rtei', '0002_auto_20190109_1413'),
]
operations = [
migrations.AddField(
model_name='rteidocument',
name='file_size',
field=models.PositiveIntegerField(editable=False, null=True),
),
]
|
|
fe5d8c87f976d1d73f26f713919193da21a0ae84
|
ntm/memory.py
|
ntm/memory.py
|
import theano
import theano.tensor as T
import numpy as np
from lasagne.layers import InputLayer
class Memory(InputLayer):
"""
docstring for Memory
"""
def __init__(self, shape,
memory_init=lasagne.init.Constant(0.),
learn_init=True,
**kwargs):
super(Memory, self).__init__(shape, **kwargs)
self.memory_init = self.add_param(
memory_init, shape,
name=self.name, trainable=learn_init, regularizable=False)
|
Add Memory component, inheriting from InputLayer
|
Add Memory component, inheriting from InputLayer
|
Python
|
mit
|
snipsco/ntm-lasagne
|
Add Memory component, inheriting from InputLayer
|
import theano
import theano.tensor as T
import numpy as np
from lasagne.layers import InputLayer
class Memory(InputLayer):
"""
docstring for Memory
"""
def __init__(self, shape,
memory_init=lasagne.init.Constant(0.),
learn_init=True,
**kwargs):
super(Memory, self).__init__(shape, **kwargs)
self.memory_init = self.add_param(
memory_init, shape,
name=self.name, trainable=learn_init, regularizable=False)
|
<commit_before><commit_msg>Add Memory component, inheriting from InputLayer<commit_after>
|
import theano
import theano.tensor as T
import numpy as np
from lasagne.layers import InputLayer
class Memory(InputLayer):
"""
docstring for Memory
"""
def __init__(self, shape,
memory_init=lasagne.init.Constant(0.),
learn_init=True,
**kwargs):
super(Memory, self).__init__(shape, **kwargs)
self.memory_init = self.add_param(
memory_init, shape,
name=self.name, trainable=learn_init, regularizable=False)
|
Add Memory component, inheriting from InputLayerimport theano
import theano.tensor as T
import numpy as np
from lasagne.layers import InputLayer
class Memory(InputLayer):
"""
docstring for Memory
"""
def __init__(self, shape,
memory_init=lasagne.init.Constant(0.),
learn_init=True,
**kwargs):
super(Memory, self).__init__(shape, **kwargs)
self.memory_init = self.add_param(
memory_init, shape,
name=self.name, trainable=learn_init, regularizable=False)
|
<commit_before><commit_msg>Add Memory component, inheriting from InputLayer<commit_after>import theano
import theano.tensor as T
import numpy as np
from lasagne.layers import InputLayer
class Memory(InputLayer):
"""
docstring for Memory
"""
def __init__(self, shape,
memory_init=lasagne.init.Constant(0.),
learn_init=True,
**kwargs):
super(Memory, self).__init__(shape, **kwargs)
self.memory_init = self.add_param(
memory_init, shape,
name=self.name, trainable=learn_init, regularizable=False)
|
|
d7b98f9e77caef34cff9fc7de19ac4980abb9d38
|
flaskext/debugtoolbar/utils.py
|
flaskext/debugtoolbar/utils.py
|
import os.path
import sys
from flask import current_app
def format_fname(value):
# If the value is not an absolute path, the it is a builtin or
# a relative file (thus a project file).
if not os.path.isabs(value):
if value.startswith(('{', '<')):
return value
if value.startswith('.' + os.path.sep):
return value
return '.' + os.path.sep + value
# If the file is absolute and within the project root handle it as
# a project file
if value.startswith(current_app.root_path):
return "." + value[len(current_app.root_path):]
# Loop through sys.path to find the longest match and return
# the relative path from there.
paths = sys.path
prefix = None
prefix_len = 0
for path in sys.path:
new_prefix = os.path.commonprefix([path, value])
if len(new_prefix) > prefix_len:
prefix = new_prefix
prefix_len = len(prefix)
if not prefix.endswith(os.path.sep):
prefix_len -= 1
path = value[prefix_len:]
return '<%s>' % path
|
Add a format_fname function which translates a absolute filename to a filename relative to the sys.path or relative to the project root
|
Add a format_fname function which translates a absolute filename to a filename relative to the sys.path or relative to the project root
|
Python
|
bsd-3-clause
|
lepture/flask-debugtoolbar,dianchang/flask-debugtoolbar,lepture/flask-debugtoolbar,dianchang/flask-debugtoolbar,dianchang/flask-debugtoolbar
|
Add a format_fname function which translates a absolute filename to a filename relative to the sys.path or relative to the project root
|
import os.path
import sys
from flask import current_app
def format_fname(value):
# If the value is not an absolute path, the it is a builtin or
# a relative file (thus a project file).
if not os.path.isabs(value):
if value.startswith(('{', '<')):
return value
if value.startswith('.' + os.path.sep):
return value
return '.' + os.path.sep + value
# If the file is absolute and within the project root handle it as
# a project file
if value.startswith(current_app.root_path):
return "." + value[len(current_app.root_path):]
# Loop through sys.path to find the longest match and return
# the relative path from there.
paths = sys.path
prefix = None
prefix_len = 0
for path in sys.path:
new_prefix = os.path.commonprefix([path, value])
if len(new_prefix) > prefix_len:
prefix = new_prefix
prefix_len = len(prefix)
if not prefix.endswith(os.path.sep):
prefix_len -= 1
path = value[prefix_len:]
return '<%s>' % path
|
<commit_before><commit_msg>Add a format_fname function which translates a absolute filename to a filename relative to the sys.path or relative to the project root<commit_after>
|
import os.path
import sys
from flask import current_app
def format_fname(value):
# If the value is not an absolute path, the it is a builtin or
# a relative file (thus a project file).
if not os.path.isabs(value):
if value.startswith(('{', '<')):
return value
if value.startswith('.' + os.path.sep):
return value
return '.' + os.path.sep + value
# If the file is absolute and within the project root handle it as
# a project file
if value.startswith(current_app.root_path):
return "." + value[len(current_app.root_path):]
# Loop through sys.path to find the longest match and return
# the relative path from there.
paths = sys.path
prefix = None
prefix_len = 0
for path in sys.path:
new_prefix = os.path.commonprefix([path, value])
if len(new_prefix) > prefix_len:
prefix = new_prefix
prefix_len = len(prefix)
if not prefix.endswith(os.path.sep):
prefix_len -= 1
path = value[prefix_len:]
return '<%s>' % path
|
Add a format_fname function which translates a absolute filename to a filename relative to the sys.path or relative to the project rootimport os.path
import sys
from flask import current_app
def format_fname(value):
# If the value is not an absolute path, the it is a builtin or
# a relative file (thus a project file).
if not os.path.isabs(value):
if value.startswith(('{', '<')):
return value
if value.startswith('.' + os.path.sep):
return value
return '.' + os.path.sep + value
# If the file is absolute and within the project root handle it as
# a project file
if value.startswith(current_app.root_path):
return "." + value[len(current_app.root_path):]
# Loop through sys.path to find the longest match and return
# the relative path from there.
paths = sys.path
prefix = None
prefix_len = 0
for path in sys.path:
new_prefix = os.path.commonprefix([path, value])
if len(new_prefix) > prefix_len:
prefix = new_prefix
prefix_len = len(prefix)
if not prefix.endswith(os.path.sep):
prefix_len -= 1
path = value[prefix_len:]
return '<%s>' % path
|
<commit_before><commit_msg>Add a format_fname function which translates a absolute filename to a filename relative to the sys.path or relative to the project root<commit_after>import os.path
import sys
from flask import current_app
def format_fname(value):
# If the value is not an absolute path, the it is a builtin or
# a relative file (thus a project file).
if not os.path.isabs(value):
if value.startswith(('{', '<')):
return value
if value.startswith('.' + os.path.sep):
return value
return '.' + os.path.sep + value
# If the file is absolute and within the project root handle it as
# a project file
if value.startswith(current_app.root_path):
return "." + value[len(current_app.root_path):]
# Loop through sys.path to find the longest match and return
# the relative path from there.
paths = sys.path
prefix = None
prefix_len = 0
for path in sys.path:
new_prefix = os.path.commonprefix([path, value])
if len(new_prefix) > prefix_len:
prefix = new_prefix
prefix_len = len(prefix)
if not prefix.endswith(os.path.sep):
prefix_len -= 1
path = value[prefix_len:]
return '<%s>' % path
|
|
50f837a89a2303d89a7831e0cf5a4afc99eb7026
|
foyer/tests/test_xml_writer.py
|
foyer/tests/test_xml_writer.py
|
import glob
import itertools as it
import os
import parmed as pmd
from pkg_resources import resource_filename
import pytest
from foyer import Forcefield
from foyer.tests.utils import atomtype
from foyer.xml_writer import write_foyer
def test_write_xml(filename, ff_file):
structure = pmd.loadfile(filename)
forcefield = Forcefield(ff_file)
structure.write_foyer('test.xml', forcefield=forcefield)
def test_load_xml():
structure = pmd.loadfile(filename)
forcefield = Forcefield(ff_file)
structure.write_foyer('test.xml', forcefield=forcefield)
generated_ff = Forcefield('text.xml')
|
Add nonworking tests for xml writer
|
Add nonworking tests for xml writer
|
Python
|
mit
|
iModels/foyer,mosdef-hub/foyer,iModels/foyer,mosdef-hub/foyer
|
Add nonworking tests for xml writer
|
import glob
import itertools as it
import os
import parmed as pmd
from pkg_resources import resource_filename
import pytest
from foyer import Forcefield
from foyer.tests.utils import atomtype
from foyer.xml_writer import write_foyer
def test_write_xml(filename, ff_file):
structure = pmd.loadfile(filename)
forcefield = Forcefield(ff_file)
structure.write_foyer('test.xml', forcefield=forcefield)
def test_load_xml():
structure = pmd.loadfile(filename)
forcefield = Forcefield(ff_file)
structure.write_foyer('test.xml', forcefield=forcefield)
generated_ff = Forcefield('text.xml')
|
<commit_before><commit_msg>Add nonworking tests for xml writer<commit_after>
|
import glob
import itertools as it
import os
import parmed as pmd
from pkg_resources import resource_filename
import pytest
from foyer import Forcefield
from foyer.tests.utils import atomtype
from foyer.xml_writer import write_foyer
def test_write_xml(filename, ff_file):
structure = pmd.loadfile(filename)
forcefield = Forcefield(ff_file)
structure.write_foyer('test.xml', forcefield=forcefield)
def test_load_xml():
structure = pmd.loadfile(filename)
forcefield = Forcefield(ff_file)
structure.write_foyer('test.xml', forcefield=forcefield)
generated_ff = Forcefield('text.xml')
|
Add nonworking tests for xml writerimport glob
import itertools as it
import os
import parmed as pmd
from pkg_resources import resource_filename
import pytest
from foyer import Forcefield
from foyer.tests.utils import atomtype
from foyer.xml_writer import write_foyer
def test_write_xml(filename, ff_file):
structure = pmd.loadfile(filename)
forcefield = Forcefield(ff_file)
structure.write_foyer('test.xml', forcefield=forcefield)
def test_load_xml():
structure = pmd.loadfile(filename)
forcefield = Forcefield(ff_file)
structure.write_foyer('test.xml', forcefield=forcefield)
generated_ff = Forcefield('text.xml')
|
<commit_before><commit_msg>Add nonworking tests for xml writer<commit_after>import glob
import itertools as it
import os
import parmed as pmd
from pkg_resources import resource_filename
import pytest
from foyer import Forcefield
from foyer.tests.utils import atomtype
from foyer.xml_writer import write_foyer
def test_write_xml(filename, ff_file):
structure = pmd.loadfile(filename)
forcefield = Forcefield(ff_file)
structure.write_foyer('test.xml', forcefield=forcefield)
def test_load_xml():
structure = pmd.loadfile(filename)
forcefield = Forcefield(ff_file)
structure.write_foyer('test.xml', forcefield=forcefield)
generated_ff = Forcefield('text.xml')
|
|
1670ec1055ef6c957a319e683104081afd8db718
|
python-example/test_check_stickers.py
|
python-example/test_check_stickers.py
|
import pytest
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException
@pytest.fixture
def driver(request):
wd = webdriver.Chrome()
wd.implicitly_wait(2)
request.addfinalizer(wd.quit)
return wd
def test_check_stickers(driver):
driver.get("http://localhost/litecart/")
driver.find_element(By.XPATH, "//div[@class='content']//a[contains(., 'Rubber Ducks')]").click()
all_ducks = driver.find_elements(By.CSS_SELECTOR, '.col-xs-halfs.col-sm-thirds.col-md-fourths.col-lg-fifths')
for duck in all_ducks:
try:
sticker = duck.find_element(By.CSS_SELECTOR, '.sticker')
sticker_type = sticker.get_attribute("textContent")
print("Sticker '" + sticker_type + "' is detected")
assert len(duck.find_elements(By.CSS_SELECTOR, '.sticker')) == 1
print("This sticker is only one for this duck")
except NoSuchElementException:
print("I do not find any stickers. That's why test is fail.")
assert False
|
Check stickers (find elements, locators, wait)
|
Check stickers (find elements, locators, wait)
|
Python
|
apache-2.0
|
eugene-petrash/selenium-webdriver-full-tutorial,eugene-petrash/selenium-webdriver-full-tutorial
|
Check stickers (find elements, locators, wait)
|
import pytest
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException
@pytest.fixture
def driver(request):
wd = webdriver.Chrome()
wd.implicitly_wait(2)
request.addfinalizer(wd.quit)
return wd
def test_check_stickers(driver):
driver.get("http://localhost/litecart/")
driver.find_element(By.XPATH, "//div[@class='content']//a[contains(., 'Rubber Ducks')]").click()
all_ducks = driver.find_elements(By.CSS_SELECTOR, '.col-xs-halfs.col-sm-thirds.col-md-fourths.col-lg-fifths')
for duck in all_ducks:
try:
sticker = duck.find_element(By.CSS_SELECTOR, '.sticker')
sticker_type = sticker.get_attribute("textContent")
print("Sticker '" + sticker_type + "' is detected")
assert len(duck.find_elements(By.CSS_SELECTOR, '.sticker')) == 1
print("This sticker is only one for this duck")
except NoSuchElementException:
print("I do not find any stickers. That's why test is fail.")
assert False
|
<commit_before><commit_msg>Check stickers (find elements, locators, wait)<commit_after>
|
import pytest
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException
@pytest.fixture
def driver(request):
wd = webdriver.Chrome()
wd.implicitly_wait(2)
request.addfinalizer(wd.quit)
return wd
def test_check_stickers(driver):
driver.get("http://localhost/litecart/")
driver.find_element(By.XPATH, "//div[@class='content']//a[contains(., 'Rubber Ducks')]").click()
all_ducks = driver.find_elements(By.CSS_SELECTOR, '.col-xs-halfs.col-sm-thirds.col-md-fourths.col-lg-fifths')
for duck in all_ducks:
try:
sticker = duck.find_element(By.CSS_SELECTOR, '.sticker')
sticker_type = sticker.get_attribute("textContent")
print("Sticker '" + sticker_type + "' is detected")
assert len(duck.find_elements(By.CSS_SELECTOR, '.sticker')) == 1
print("This sticker is only one for this duck")
except NoSuchElementException:
print("I do not find any stickers. That's why test is fail.")
assert False
|
Check stickers (find elements, locators, wait)import pytest
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException
@pytest.fixture
def driver(request):
wd = webdriver.Chrome()
wd.implicitly_wait(2)
request.addfinalizer(wd.quit)
return wd
def test_check_stickers(driver):
driver.get("http://localhost/litecart/")
driver.find_element(By.XPATH, "//div[@class='content']//a[contains(., 'Rubber Ducks')]").click()
all_ducks = driver.find_elements(By.CSS_SELECTOR, '.col-xs-halfs.col-sm-thirds.col-md-fourths.col-lg-fifths')
for duck in all_ducks:
try:
sticker = duck.find_element(By.CSS_SELECTOR, '.sticker')
sticker_type = sticker.get_attribute("textContent")
print("Sticker '" + sticker_type + "' is detected")
assert len(duck.find_elements(By.CSS_SELECTOR, '.sticker')) == 1
print("This sticker is only one for this duck")
except NoSuchElementException:
print("I do not find any stickers. That's why test is fail.")
assert False
|
<commit_before><commit_msg>Check stickers (find elements, locators, wait)<commit_after>import pytest
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException
@pytest.fixture
def driver(request):
wd = webdriver.Chrome()
wd.implicitly_wait(2)
request.addfinalizer(wd.quit)
return wd
def test_check_stickers(driver):
driver.get("http://localhost/litecart/")
driver.find_element(By.XPATH, "//div[@class='content']//a[contains(., 'Rubber Ducks')]").click()
all_ducks = driver.find_elements(By.CSS_SELECTOR, '.col-xs-halfs.col-sm-thirds.col-md-fourths.col-lg-fifths')
for duck in all_ducks:
try:
sticker = duck.find_element(By.CSS_SELECTOR, '.sticker')
sticker_type = sticker.get_attribute("textContent")
print("Sticker '" + sticker_type + "' is detected")
assert len(duck.find_elements(By.CSS_SELECTOR, '.sticker')) == 1
print("This sticker is only one for this duck")
except NoSuchElementException:
print("I do not find any stickers. That's why test is fail.")
assert False
|
|
ced1e16a316fb6f5b22f5da7bb130faf1477b400
|
tests/stdout_test.py
|
tests/stdout_test.py
|
from nose.tools import *
import beastling.beastxml
import beastling.configuration
def test_extractor():
config = beastling.configuration.Configuration(configfile="tests/configs/basic.conf")
config.process()
xml = beastling.beastxml.BeastXml(config)
xml.write_file("stdout")
|
Test writing XML to stdout (currently failing in py3).
|
Test writing XML to stdout (currently failing in py3).
|
Python
|
bsd-2-clause
|
lmaurits/BEASTling
|
Test writing XML to stdout (currently failing in py3).
|
from nose.tools import *
import beastling.beastxml
import beastling.configuration
def test_extractor():
config = beastling.configuration.Configuration(configfile="tests/configs/basic.conf")
config.process()
xml = beastling.beastxml.BeastXml(config)
xml.write_file("stdout")
|
<commit_before><commit_msg>Test writing XML to stdout (currently failing in py3).<commit_after>
|
from nose.tools import *
import beastling.beastxml
import beastling.configuration
def test_extractor():
config = beastling.configuration.Configuration(configfile="tests/configs/basic.conf")
config.process()
xml = beastling.beastxml.BeastXml(config)
xml.write_file("stdout")
|
Test writing XML to stdout (currently failing in py3).from nose.tools import *
import beastling.beastxml
import beastling.configuration
def test_extractor():
config = beastling.configuration.Configuration(configfile="tests/configs/basic.conf")
config.process()
xml = beastling.beastxml.BeastXml(config)
xml.write_file("stdout")
|
<commit_before><commit_msg>Test writing XML to stdout (currently failing in py3).<commit_after>from nose.tools import *
import beastling.beastxml
import beastling.configuration
def test_extractor():
config = beastling.configuration.Configuration(configfile="tests/configs/basic.conf")
config.process()
xml = beastling.beastxml.BeastXml(config)
xml.write_file("stdout")
|
|
1fa0385bc8f0fc21233005c611b169fa5b95f993
|
tools/diagnose-me.py
|
tools/diagnose-me.py
|
#!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Diagnose some common system configuration problems on Linux, and
suggest fixes."""
import subprocess
import sys
all_checks = []
def Check(name):
"""Decorator that defines a diagnostic check."""
def wrap(func):
all_checks.append((name, func))
return func
return wrap
@Check("/usr/bin/ld is not gold")
def CheckSystemLd():
proc = subprocess.Popen(['/usr/bin/ld', '-v'], stdout=subprocess.PIPE)
stdout = proc.communicate()[0]
if 'GNU gold' in stdout:
return ("When /usr/bin/ld is gold, system updates can silently\n"
"corrupt your graphics drivers.\n"
"Try 'sudo apt-get remove binutils-gold'.\n")
return None
@Check("random lds are not in the $PATH")
def CheckPathLd():
proc = subprocess.Popen(['which', '-a', 'ld'], stdout=subprocess.PIPE)
stdout = proc.communicate()[0]
instances = stdout.split()
if len(instances) > 1:
return ("You have multiple 'ld' binaries in your $PATH:\n"
+ '\n'.join(' - ' + i for i in instances) + "\n"
"You should delete all of them but your system one.\n"
"gold is hooked into your build via gyp.\n")
return None
def RunChecks():
for name, check in all_checks:
sys.stdout.write("* Checking %s: " % name)
sys.stdout.flush()
error = check()
if not error:
print "ok"
else:
print "FAIL"
print error
if __name__ == '__main__':
RunChecks()
|
Check in a script to diagnose common system configuration problems.
|
Check in a script to diagnose common system configuration problems.
Review URL: https://chromiumcodereview.appspot.com/9309011
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@120430 0039d316-1c4b-4281-b951-d872f2087c98
|
Python
|
bsd-3-clause
|
gavinp/chromium,gavinp/chromium,yitian134/chromium,yitian134/chromium,adobe/chromium,adobe/chromium,gavinp/chromium,yitian134/chromium,yitian134/chromium,adobe/chromium,adobe/chromium,ropik/chromium,adobe/chromium,ropik/chromium,adobe/chromium,yitian134/chromium,yitian134/chromium,adobe/chromium,gavinp/chromium,ropik/chromium,yitian134/chromium,gavinp/chromium,ropik/chromium,adobe/chromium,ropik/chromium,yitian134/chromium,gavinp/chromium,gavinp/chromium,gavinp/chromium,yitian134/chromium,ropik/chromium,ropik/chromium,adobe/chromium,gavinp/chromium,ropik/chromium,gavinp/chromium,ropik/chromium,yitian134/chromium,adobe/chromium,adobe/chromium
|
Check in a script to diagnose common system configuration problems.
Review URL: https://chromiumcodereview.appspot.com/9309011
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@120430 0039d316-1c4b-4281-b951-d872f2087c98
|
#!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Diagnose some common system configuration problems on Linux, and
suggest fixes."""
import subprocess
import sys
all_checks = []
def Check(name):
"""Decorator that defines a diagnostic check."""
def wrap(func):
all_checks.append((name, func))
return func
return wrap
@Check("/usr/bin/ld is not gold")
def CheckSystemLd():
proc = subprocess.Popen(['/usr/bin/ld', '-v'], stdout=subprocess.PIPE)
stdout = proc.communicate()[0]
if 'GNU gold' in stdout:
return ("When /usr/bin/ld is gold, system updates can silently\n"
"corrupt your graphics drivers.\n"
"Try 'sudo apt-get remove binutils-gold'.\n")
return None
@Check("random lds are not in the $PATH")
def CheckPathLd():
proc = subprocess.Popen(['which', '-a', 'ld'], stdout=subprocess.PIPE)
stdout = proc.communicate()[0]
instances = stdout.split()
if len(instances) > 1:
return ("You have multiple 'ld' binaries in your $PATH:\n"
+ '\n'.join(' - ' + i for i in instances) + "\n"
"You should delete all of them but your system one.\n"
"gold is hooked into your build via gyp.\n")
return None
def RunChecks():
for name, check in all_checks:
sys.stdout.write("* Checking %s: " % name)
sys.stdout.flush()
error = check()
if not error:
print "ok"
else:
print "FAIL"
print error
if __name__ == '__main__':
RunChecks()
|
<commit_before><commit_msg>Check in a script to diagnose common system configuration problems.
Review URL: https://chromiumcodereview.appspot.com/9309011
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@120430 0039d316-1c4b-4281-b951-d872f2087c98<commit_after>
|
#!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Diagnose some common system configuration problems on Linux, and
suggest fixes."""
import subprocess
import sys
all_checks = []
def Check(name):
"""Decorator that defines a diagnostic check."""
def wrap(func):
all_checks.append((name, func))
return func
return wrap
@Check("/usr/bin/ld is not gold")
def CheckSystemLd():
proc = subprocess.Popen(['/usr/bin/ld', '-v'], stdout=subprocess.PIPE)
stdout = proc.communicate()[0]
if 'GNU gold' in stdout:
return ("When /usr/bin/ld is gold, system updates can silently\n"
"corrupt your graphics drivers.\n"
"Try 'sudo apt-get remove binutils-gold'.\n")
return None
@Check("random lds are not in the $PATH")
def CheckPathLd():
proc = subprocess.Popen(['which', '-a', 'ld'], stdout=subprocess.PIPE)
stdout = proc.communicate()[0]
instances = stdout.split()
if len(instances) > 1:
return ("You have multiple 'ld' binaries in your $PATH:\n"
+ '\n'.join(' - ' + i for i in instances) + "\n"
"You should delete all of them but your system one.\n"
"gold is hooked into your build via gyp.\n")
return None
def RunChecks():
for name, check in all_checks:
sys.stdout.write("* Checking %s: " % name)
sys.stdout.flush()
error = check()
if not error:
print "ok"
else:
print "FAIL"
print error
if __name__ == '__main__':
RunChecks()
|
Check in a script to diagnose common system configuration problems.
Review URL: https://chromiumcodereview.appspot.com/9309011
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@120430 0039d316-1c4b-4281-b951-d872f2087c98#!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Diagnose some common system configuration problems on Linux, and
suggest fixes."""
import subprocess
import sys
all_checks = []
def Check(name):
"""Decorator that defines a diagnostic check."""
def wrap(func):
all_checks.append((name, func))
return func
return wrap
@Check("/usr/bin/ld is not gold")
def CheckSystemLd():
proc = subprocess.Popen(['/usr/bin/ld', '-v'], stdout=subprocess.PIPE)
stdout = proc.communicate()[0]
if 'GNU gold' in stdout:
return ("When /usr/bin/ld is gold, system updates can silently\n"
"corrupt your graphics drivers.\n"
"Try 'sudo apt-get remove binutils-gold'.\n")
return None
@Check("random lds are not in the $PATH")
def CheckPathLd():
proc = subprocess.Popen(['which', '-a', 'ld'], stdout=subprocess.PIPE)
stdout = proc.communicate()[0]
instances = stdout.split()
if len(instances) > 1:
return ("You have multiple 'ld' binaries in your $PATH:\n"
+ '\n'.join(' - ' + i for i in instances) + "\n"
"You should delete all of them but your system one.\n"
"gold is hooked into your build via gyp.\n")
return None
def RunChecks():
for name, check in all_checks:
sys.stdout.write("* Checking %s: " % name)
sys.stdout.flush()
error = check()
if not error:
print "ok"
else:
print "FAIL"
print error
if __name__ == '__main__':
RunChecks()
|
<commit_before><commit_msg>Check in a script to diagnose common system configuration problems.
Review URL: https://chromiumcodereview.appspot.com/9309011
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@120430 0039d316-1c4b-4281-b951-d872f2087c98<commit_after>#!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Diagnose some common system configuration problems on Linux, and
suggest fixes."""
import subprocess
import sys
all_checks = []
def Check(name):
"""Decorator that defines a diagnostic check."""
def wrap(func):
all_checks.append((name, func))
return func
return wrap
@Check("/usr/bin/ld is not gold")
def CheckSystemLd():
proc = subprocess.Popen(['/usr/bin/ld', '-v'], stdout=subprocess.PIPE)
stdout = proc.communicate()[0]
if 'GNU gold' in stdout:
return ("When /usr/bin/ld is gold, system updates can silently\n"
"corrupt your graphics drivers.\n"
"Try 'sudo apt-get remove binutils-gold'.\n")
return None
@Check("random lds are not in the $PATH")
def CheckPathLd():
proc = subprocess.Popen(['which', '-a', 'ld'], stdout=subprocess.PIPE)
stdout = proc.communicate()[0]
instances = stdout.split()
if len(instances) > 1:
return ("You have multiple 'ld' binaries in your $PATH:\n"
+ '\n'.join(' - ' + i for i in instances) + "\n"
"You should delete all of them but your system one.\n"
"gold is hooked into your build via gyp.\n")
return None
def RunChecks():
for name, check in all_checks:
sys.stdout.write("* Checking %s: " % name)
sys.stdout.flush()
error = check()
if not error:
print "ok"
else:
print "FAIL"
print error
if __name__ == '__main__':
RunChecks()
|
|
35b67e00e378df43a657c56bd751e7059ef45c38
|
benchexec/tools/witnesslint.py
|
benchexec/tools/witnesslint.py
|
import benchexec.result as result
import benchexec.tools.template
class Tool(benchexec.tools.template.BaseTool2):
"""
Tool info for the witness checker (witnesslint)
(https://github.com/sosy-lab/sv-witnesses)
"""
def executable(self, tool_locator):
return tool_locator.find_executable("witnesslint.py")
def name(self):
return "witnesslint"
def determine_result(self, run):
if run.exit_code.value == 0:
return result.RESULT_TRUE_PROP
else:
return result.RESULT_FALSE_PROP
|
Add tool info for the witness linter.
|
Add tool info for the witness linter.
|
Python
|
apache-2.0
|
ultimate-pa/benchexec,ultimate-pa/benchexec,sosy-lab/benchexec,ultimate-pa/benchexec,ultimate-pa/benchexec,dbeyer/benchexec,sosy-lab/benchexec,sosy-lab/benchexec,sosy-lab/benchexec,dbeyer/benchexec,ultimate-pa/benchexec,dbeyer/benchexec,dbeyer/benchexec,ultimate-pa/benchexec,sosy-lab/benchexec,sosy-lab/benchexec
|
Add tool info for the witness linter.
|
import benchexec.result as result
import benchexec.tools.template
class Tool(benchexec.tools.template.BaseTool2):
"""
Tool info for the witness checker (witnesslint)
(https://github.com/sosy-lab/sv-witnesses)
"""
def executable(self, tool_locator):
return tool_locator.find_executable("witnesslint.py")
def name(self):
return "witnesslint"
def determine_result(self, run):
if run.exit_code.value == 0:
return result.RESULT_TRUE_PROP
else:
return result.RESULT_FALSE_PROP
|
<commit_before><commit_msg>Add tool info for the witness linter.<commit_after>
|
import benchexec.result as result
import benchexec.tools.template
class Tool(benchexec.tools.template.BaseTool2):
"""
Tool info for the witness checker (witnesslint)
(https://github.com/sosy-lab/sv-witnesses)
"""
def executable(self, tool_locator):
return tool_locator.find_executable("witnesslint.py")
def name(self):
return "witnesslint"
def determine_result(self, run):
if run.exit_code.value == 0:
return result.RESULT_TRUE_PROP
else:
return result.RESULT_FALSE_PROP
|
Add tool info for the witness linter.import benchexec.result as result
import benchexec.tools.template
class Tool(benchexec.tools.template.BaseTool2):
"""
Tool info for the witness checker (witnesslint)
(https://github.com/sosy-lab/sv-witnesses)
"""
def executable(self, tool_locator):
return tool_locator.find_executable("witnesslint.py")
def name(self):
return "witnesslint"
def determine_result(self, run):
if run.exit_code.value == 0:
return result.RESULT_TRUE_PROP
else:
return result.RESULT_FALSE_PROP
|
<commit_before><commit_msg>Add tool info for the witness linter.<commit_after>import benchexec.result as result
import benchexec.tools.template
class Tool(benchexec.tools.template.BaseTool2):
"""
Tool info for the witness checker (witnesslint)
(https://github.com/sosy-lab/sv-witnesses)
"""
def executable(self, tool_locator):
return tool_locator.find_executable("witnesslint.py")
def name(self):
return "witnesslint"
def determine_result(self, run):
if run.exit_code.value == 0:
return result.RESULT_TRUE_PROP
else:
return result.RESULT_FALSE_PROP
|
|
5a4aeb96b72af43aba5a1d7f143214d4083d151f
|
examples/pipeline/fix_space_entities.py
|
examples/pipeline/fix_space_entities.py
|
'''Demonstrate adding a rule-based component that forces some tokens to not
be entities, before the NER tagger is applied. This is used to hotfix the issue
in https://github.com/explosion/spaCy/issues/2870 , present as of spaCy v2.0.16.
'''
import spacy
from spacy.attrs import ENT_IOB
def fix_space_tags(doc):
ent_iobs = doc.to_array([ENT_IOB])
for i, token in enumerate(doc):
if token.is_space:
# Sets 'O' tag (0 is None, so I is 1, O is 2)
ent_iobs[i] = 2
doc.from_array([ENT_IOB], ent_iobs.reshape((len(doc), 1)))
return doc
def main():
nlp = spacy.load('en_core_web_sm')
text = u'''This is some crazy test where I dont need an Apple Watch to make things bug'''
doc = nlp(text)
print('Before', doc.ents)
nlp.add_pipe(fix_space_tags, name='fix-ner', before='ner')
doc = nlp(text)
print('After', doc.ents)
if __name__ == '__main__':
main()
|
Add example showing a fix-up rule for space entities
|
Add example showing a fix-up rule for space entities
|
Python
|
mit
|
explosion/spaCy,explosion/spaCy,recognai/spaCy,spacy-io/spaCy,recognai/spaCy,spacy-io/spaCy,recognai/spaCy,explosion/spaCy,explosion/spaCy,recognai/spaCy,spacy-io/spaCy,explosion/spaCy,honnibal/spaCy,honnibal/spaCy,recognai/spaCy,explosion/spaCy,recognai/spaCy,spacy-io/spaCy,spacy-io/spaCy,honnibal/spaCy,honnibal/spaCy,spacy-io/spaCy
|
Add example showing a fix-up rule for space entities
|
'''Demonstrate adding a rule-based component that forces some tokens to not
be entities, before the NER tagger is applied. This is used to hotfix the issue
in https://github.com/explosion/spaCy/issues/2870 , present as of spaCy v2.0.16.
'''
import spacy
from spacy.attrs import ENT_IOB
def fix_space_tags(doc):
ent_iobs = doc.to_array([ENT_IOB])
for i, token in enumerate(doc):
if token.is_space:
# Sets 'O' tag (0 is None, so I is 1, O is 2)
ent_iobs[i] = 2
doc.from_array([ENT_IOB], ent_iobs.reshape((len(doc), 1)))
return doc
def main():
nlp = spacy.load('en_core_web_sm')
text = u'''This is some crazy test where I dont need an Apple Watch to make things bug'''
doc = nlp(text)
print('Before', doc.ents)
nlp.add_pipe(fix_space_tags, name='fix-ner', before='ner')
doc = nlp(text)
print('After', doc.ents)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add example showing a fix-up rule for space entities<commit_after>
|
'''Demonstrate adding a rule-based component that forces some tokens to not
be entities, before the NER tagger is applied. This is used to hotfix the issue
in https://github.com/explosion/spaCy/issues/2870 , present as of spaCy v2.0.16.
'''
import spacy
from spacy.attrs import ENT_IOB
def fix_space_tags(doc):
ent_iobs = doc.to_array([ENT_IOB])
for i, token in enumerate(doc):
if token.is_space:
# Sets 'O' tag (0 is None, so I is 1, O is 2)
ent_iobs[i] = 2
doc.from_array([ENT_IOB], ent_iobs.reshape((len(doc), 1)))
return doc
def main():
nlp = spacy.load('en_core_web_sm')
text = u'''This is some crazy test where I dont need an Apple Watch to make things bug'''
doc = nlp(text)
print('Before', doc.ents)
nlp.add_pipe(fix_space_tags, name='fix-ner', before='ner')
doc = nlp(text)
print('After', doc.ents)
if __name__ == '__main__':
main()
|
Add example showing a fix-up rule for space entities'''Demonstrate adding a rule-based component that forces some tokens to not
be entities, before the NER tagger is applied. This is used to hotfix the issue
in https://github.com/explosion/spaCy/issues/2870 , present as of spaCy v2.0.16.
'''
import spacy
from spacy.attrs import ENT_IOB
def fix_space_tags(doc):
ent_iobs = doc.to_array([ENT_IOB])
for i, token in enumerate(doc):
if token.is_space:
# Sets 'O' tag (0 is None, so I is 1, O is 2)
ent_iobs[i] = 2
doc.from_array([ENT_IOB], ent_iobs.reshape((len(doc), 1)))
return doc
def main():
nlp = spacy.load('en_core_web_sm')
text = u'''This is some crazy test where I dont need an Apple Watch to make things bug'''
doc = nlp(text)
print('Before', doc.ents)
nlp.add_pipe(fix_space_tags, name='fix-ner', before='ner')
doc = nlp(text)
print('After', doc.ents)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add example showing a fix-up rule for space entities<commit_after>'''Demonstrate adding a rule-based component that forces some tokens to not
be entities, before the NER tagger is applied. This is used to hotfix the issue
in https://github.com/explosion/spaCy/issues/2870 , present as of spaCy v2.0.16.
'''
import spacy
from spacy.attrs import ENT_IOB
def fix_space_tags(doc):
ent_iobs = doc.to_array([ENT_IOB])
for i, token in enumerate(doc):
if token.is_space:
# Sets 'O' tag (0 is None, so I is 1, O is 2)
ent_iobs[i] = 2
doc.from_array([ENT_IOB], ent_iobs.reshape((len(doc), 1)))
return doc
def main():
nlp = spacy.load('en_core_web_sm')
text = u'''This is some crazy test where I dont need an Apple Watch to make things bug'''
doc = nlp(text)
print('Before', doc.ents)
nlp.add_pipe(fix_space_tags, name='fix-ner', before='ner')
doc = nlp(text)
print('After', doc.ents)
if __name__ == '__main__':
main()
|
|
9c1cb898a6a85d3bf23782ab349fb45224beee6a
|
tempest/tests/services/compute/test_keypairs_client.py
|
tempest/tests/services/compute/test_keypairs_client.py
|
# Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httplib2
from oslotest import mockpatch
from tempest.services.compute.json import keypairs_client
from tempest.tests import base
from tempest.tests import fake_auth_provider
class TestKeyPairsClient(base.TestCase):
def setUp(self):
super(TestKeyPairsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = keypairs_client.KeyPairsClient(
fake_auth, 'compute', 'regionOne')
def _test_list_keypairs(self, bytes_body=False):
body = '{"keypairs": []}'
if bytes_body:
body = body.encode('utf-8')
expected = []
response = (httplib2.Response({'status': 200}), body)
self.useFixture(mockpatch.Patch(
'tempest.common.service_client.ServiceClient.get',
return_value=response))
self.assertEqual(expected, self.client.list_keypairs())
def test_list_keypairs_with_str_body(self):
self._test_list_keypairs()
def test_list_keypairs_with_bytes_body(self):
self._test_list_keypairs(bytes_body=True)
|
Add unit test for method list_keypairs
|
Add unit test for method list_keypairs
This patch adds unit test for keypairs_client.
Specific about method list_keypairs.
Change-Id: I7879280ff4f0211c7ec59c7225183f39978d3d41
|
Python
|
apache-2.0
|
sebrandon1/tempest,rakeshmi/tempest,manasi24/tempest,Tesora/tesora-tempest,openstack/tempest,flyingfish007/tempest,izadorozhna/tempest,rakeshmi/tempest,hayderimran7/tempest,tudorvio/tempest,vedujoshi/tempest,alinbalutoiu/tempest,alinbalutoiu/tempest,flyingfish007/tempest,izadorozhna/tempest,manasi24/tempest,zsoltdudas/lis-tempest,cisco-openstack/tempest,NexusIS/tempest,zsoltdudas/lis-tempest,JioCloud/tempest,xbezdick/tempest,dkalashnik/tempest,varunarya10/tempest,masayukig/tempest,akash1808/tempest,hayderimran7/tempest,pandeyop/tempest,Juniper/tempest,bigswitch/tempest,cisco-openstack/tempest,xbezdick/tempest,nunogt/tempest,tonyli71/tempest,vedujoshi/tempest,pandeyop/tempest,LIS/lis-tempest,masayukig/tempest,manasi24/jiocloud-tempest-qatempest,sebrandon1/tempest,tudorvio/tempest,Juniper/tempest,tonyli71/tempest,nunogt/tempest,bigswitch/tempest,NexusIS/tempest,roopali8/tempest,pczerkas/tempest,akash1808/tempest,varunarya10/tempest,JioCloud/tempest,pczerkas/tempest,dkalashnik/tempest,openstack/tempest,Tesora/tesora-tempest,roopali8/tempest,LIS/lis-tempest,manasi24/jiocloud-tempest-qatempest
|
Add unit test for method list_keypairs
This patch adds unit test for keypairs_client.
Specific about method list_keypairs.
Change-Id: I7879280ff4f0211c7ec59c7225183f39978d3d41
|
# Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httplib2
from oslotest import mockpatch
from tempest.services.compute.json import keypairs_client
from tempest.tests import base
from tempest.tests import fake_auth_provider
class TestKeyPairsClient(base.TestCase):
def setUp(self):
super(TestKeyPairsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = keypairs_client.KeyPairsClient(
fake_auth, 'compute', 'regionOne')
def _test_list_keypairs(self, bytes_body=False):
body = '{"keypairs": []}'
if bytes_body:
body = body.encode('utf-8')
expected = []
response = (httplib2.Response({'status': 200}), body)
self.useFixture(mockpatch.Patch(
'tempest.common.service_client.ServiceClient.get',
return_value=response))
self.assertEqual(expected, self.client.list_keypairs())
def test_list_keypairs_with_str_body(self):
self._test_list_keypairs()
def test_list_keypairs_with_bytes_body(self):
self._test_list_keypairs(bytes_body=True)
|
<commit_before><commit_msg>Add unit test for method list_keypairs
This patch adds unit test for keypairs_client.
Specific about method list_keypairs.
Change-Id: I7879280ff4f0211c7ec59c7225183f39978d3d41<commit_after>
|
# Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httplib2
from oslotest import mockpatch
from tempest.services.compute.json import keypairs_client
from tempest.tests import base
from tempest.tests import fake_auth_provider
class TestKeyPairsClient(base.TestCase):
def setUp(self):
super(TestKeyPairsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = keypairs_client.KeyPairsClient(
fake_auth, 'compute', 'regionOne')
def _test_list_keypairs(self, bytes_body=False):
body = '{"keypairs": []}'
if bytes_body:
body = body.encode('utf-8')
expected = []
response = (httplib2.Response({'status': 200}), body)
self.useFixture(mockpatch.Patch(
'tempest.common.service_client.ServiceClient.get',
return_value=response))
self.assertEqual(expected, self.client.list_keypairs())
def test_list_keypairs_with_str_body(self):
self._test_list_keypairs()
def test_list_keypairs_with_bytes_body(self):
self._test_list_keypairs(bytes_body=True)
|
Add unit test for method list_keypairs
This patch adds unit test for keypairs_client.
Specific about method list_keypairs.
Change-Id: I7879280ff4f0211c7ec59c7225183f39978d3d41# Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httplib2
from oslotest import mockpatch
from tempest.services.compute.json import keypairs_client
from tempest.tests import base
from tempest.tests import fake_auth_provider
class TestKeyPairsClient(base.TestCase):
def setUp(self):
super(TestKeyPairsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = keypairs_client.KeyPairsClient(
fake_auth, 'compute', 'regionOne')
def _test_list_keypairs(self, bytes_body=False):
body = '{"keypairs": []}'
if bytes_body:
body = body.encode('utf-8')
expected = []
response = (httplib2.Response({'status': 200}), body)
self.useFixture(mockpatch.Patch(
'tempest.common.service_client.ServiceClient.get',
return_value=response))
self.assertEqual(expected, self.client.list_keypairs())
def test_list_keypairs_with_str_body(self):
self._test_list_keypairs()
def test_list_keypairs_with_bytes_body(self):
self._test_list_keypairs(bytes_body=True)
|
<commit_before><commit_msg>Add unit test for method list_keypairs
This patch adds unit test for keypairs_client.
Specific about method list_keypairs.
Change-Id: I7879280ff4f0211c7ec59c7225183f39978d3d41<commit_after># Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httplib2
from oslotest import mockpatch
from tempest.services.compute.json import keypairs_client
from tempest.tests import base
from tempest.tests import fake_auth_provider
class TestKeyPairsClient(base.TestCase):
def setUp(self):
super(TestKeyPairsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = keypairs_client.KeyPairsClient(
fake_auth, 'compute', 'regionOne')
def _test_list_keypairs(self, bytes_body=False):
body = '{"keypairs": []}'
if bytes_body:
body = body.encode('utf-8')
expected = []
response = (httplib2.Response({'status': 200}), body)
self.useFixture(mockpatch.Patch(
'tempest.common.service_client.ServiceClient.get',
return_value=response))
self.assertEqual(expected, self.client.list_keypairs())
def test_list_keypairs_with_str_body(self):
self._test_list_keypairs()
def test_list_keypairs_with_bytes_body(self):
self._test_list_keypairs(bytes_body=True)
|
|
faa917acfa0257aeb508ac6ea6393861f955945e
|
zou/app/utils/emails.py
|
zou/app/utils/emails.py
|
from flask_mail import Message
from zou.app import mail
def send_email(subject, body, recipient_email):
"""
Send an email with given subject and body to given recipient.
"""
message = Message(
body=body,
subject=subject,
recipients=[recipient_email]
)
mail.send(message)
|
Add a email helpers library
|
Add a email helpers library
|
Python
|
agpl-3.0
|
cgwire/zou
|
Add a email helpers library
|
from flask_mail import Message
from zou.app import mail
def send_email(subject, body, recipient_email):
"""
Send an email with given subject and body to given recipient.
"""
message = Message(
body=body,
subject=subject,
recipients=[recipient_email]
)
mail.send(message)
|
<commit_before><commit_msg>Add a email helpers library<commit_after>
|
from flask_mail import Message
from zou.app import mail
def send_email(subject, body, recipient_email):
"""
Send an email with given subject and body to given recipient.
"""
message = Message(
body=body,
subject=subject,
recipients=[recipient_email]
)
mail.send(message)
|
Add a email helpers libraryfrom flask_mail import Message
from zou.app import mail
def send_email(subject, body, recipient_email):
"""
Send an email with given subject and body to given recipient.
"""
message = Message(
body=body,
subject=subject,
recipients=[recipient_email]
)
mail.send(message)
|
<commit_before><commit_msg>Add a email helpers library<commit_after>from flask_mail import Message
from zou.app import mail
def send_email(subject, body, recipient_email):
"""
Send an email with given subject and body to given recipient.
"""
message = Message(
body=body,
subject=subject,
recipients=[recipient_email]
)
mail.send(message)
|
|
9244a246b5501ae984863dd442f5b2ab10bc437b
|
lxc-stress.py
|
lxc-stress.py
|
#!/usr/bin/env python
import time
NUM_RUNS = 10
def stress_cpu(num_passes=20000):
sum(1/16**k *
(4/(8*k+1) -
2/(8*k+4) -
1/(8*k+5) -
1/(8*k+6)) for k in xrange(num_passes))
start_time = time.time()
for i in xrange(0, NUM_RUNS):
print('Running stress test {}/{}'.format(i + 1, NUM_RUNS))
stress_cpu()
end_time = time.time()
delta_time = end_time - start_time
avg_time = delta_time / float(NUM_RUNS)
print('Average run time (s): {}'.format(avg_time))
|
Add basic script for stress testing a remote servers CPU
|
Add basic script for stress testing a remote servers CPU
|
Python
|
mit
|
ben-rowan/lxc-calibrate
|
Add basic script for stress testing a remote servers CPU
|
#!/usr/bin/env python
import time
NUM_RUNS = 10
def stress_cpu(num_passes=20000):
sum(1/16**k *
(4/(8*k+1) -
2/(8*k+4) -
1/(8*k+5) -
1/(8*k+6)) for k in xrange(num_passes))
start_time = time.time()
for i in xrange(0, NUM_RUNS):
print('Running stress test {}/{}'.format(i + 1, NUM_RUNS))
stress_cpu()
end_time = time.time()
delta_time = end_time - start_time
avg_time = delta_time / float(NUM_RUNS)
print('Average run time (s): {}'.format(avg_time))
|
<commit_before><commit_msg>Add basic script for stress testing a remote servers CPU<commit_after>
|
#!/usr/bin/env python
import time
NUM_RUNS = 10
def stress_cpu(num_passes=20000):
sum(1/16**k *
(4/(8*k+1) -
2/(8*k+4) -
1/(8*k+5) -
1/(8*k+6)) for k in xrange(num_passes))
start_time = time.time()
for i in xrange(0, NUM_RUNS):
print('Running stress test {}/{}'.format(i + 1, NUM_RUNS))
stress_cpu()
end_time = time.time()
delta_time = end_time - start_time
avg_time = delta_time / float(NUM_RUNS)
print('Average run time (s): {}'.format(avg_time))
|
Add basic script for stress testing a remote servers CPU#!/usr/bin/env python
import time
NUM_RUNS = 10
def stress_cpu(num_passes=20000):
sum(1/16**k *
(4/(8*k+1) -
2/(8*k+4) -
1/(8*k+5) -
1/(8*k+6)) for k in xrange(num_passes))
start_time = time.time()
for i in xrange(0, NUM_RUNS):
print('Running stress test {}/{}'.format(i + 1, NUM_RUNS))
stress_cpu()
end_time = time.time()
delta_time = end_time - start_time
avg_time = delta_time / float(NUM_RUNS)
print('Average run time (s): {}'.format(avg_time))
|
<commit_before><commit_msg>Add basic script for stress testing a remote servers CPU<commit_after>#!/usr/bin/env python
import time
NUM_RUNS = 10
def stress_cpu(num_passes=20000):
sum(1/16**k *
(4/(8*k+1) -
2/(8*k+4) -
1/(8*k+5) -
1/(8*k+6)) for k in xrange(num_passes))
start_time = time.time()
for i in xrange(0, NUM_RUNS):
print('Running stress test {}/{}'.format(i + 1, NUM_RUNS))
stress_cpu()
end_time = time.time()
delta_time = end_time - start_time
avg_time = delta_time / float(NUM_RUNS)
print('Average run time (s): {}'.format(avg_time))
|
|
498bfbbde87fd94fde0775ab7d366c972b09c1d0
|
challenges/232-Palindromes.py
|
challenges/232-Palindromes.py
|
import re
def isPalindrome(file):
infile = open(file, 'r')
lines = infile.readlines()
num_lines = int(lines[0])
total_string = "";
for line in lines[1: (num_lines+1)]:
total_string += line
total_string = re.sub('[^a-zA-Z0-9]', '', total_string)
total_string = total_string.lower()
if(total_string == total_string[::-1]):
print('Palindrome')
else:
print('Not a palindrome')
isPalindrome('../resources/long_palindrome.txt')
|
Add Challenge 232 Palindrome Solution
|
Add Challenge 232 Palindrome Solution
|
Python
|
mit
|
JonShepChen/DailyProgrammerChallenges
|
Add Challenge 232 Palindrome Solution
|
import re
def isPalindrome(file):
infile = open(file, 'r')
lines = infile.readlines()
num_lines = int(lines[0])
total_string = "";
for line in lines[1: (num_lines+1)]:
total_string += line
total_string = re.sub('[^a-zA-Z0-9]', '', total_string)
total_string = total_string.lower()
if(total_string == total_string[::-1]):
print('Palindrome')
else:
print('Not a palindrome')
isPalindrome('../resources/long_palindrome.txt')
|
<commit_before><commit_msg>Add Challenge 232 Palindrome Solution<commit_after>
|
import re
def isPalindrome(file):
infile = open(file, 'r')
lines = infile.readlines()
num_lines = int(lines[0])
total_string = "";
for line in lines[1: (num_lines+1)]:
total_string += line
total_string = re.sub('[^a-zA-Z0-9]', '', total_string)
total_string = total_string.lower()
if(total_string == total_string[::-1]):
print('Palindrome')
else:
print('Not a palindrome')
isPalindrome('../resources/long_palindrome.txt')
|
Add Challenge 232 Palindrome Solutionimport re
def isPalindrome(file):
infile = open(file, 'r')
lines = infile.readlines()
num_lines = int(lines[0])
total_string = "";
for line in lines[1: (num_lines+1)]:
total_string += line
total_string = re.sub('[^a-zA-Z0-9]', '', total_string)
total_string = total_string.lower()
if(total_string == total_string[::-1]):
print('Palindrome')
else:
print('Not a palindrome')
isPalindrome('../resources/long_palindrome.txt')
|
<commit_before><commit_msg>Add Challenge 232 Palindrome Solution<commit_after>import re
def isPalindrome(file):
infile = open(file, 'r')
lines = infile.readlines()
num_lines = int(lines[0])
total_string = "";
for line in lines[1: (num_lines+1)]:
total_string += line
total_string = re.sub('[^a-zA-Z0-9]', '', total_string)
total_string = total_string.lower()
if(total_string == total_string[::-1]):
print('Palindrome')
else:
print('Not a palindrome')
isPalindrome('../resources/long_palindrome.txt')
|
|
f5c46ea946f487d6ff445020bac55bd7b137088b
|
test/widgets/test_wttr.py
|
test/widgets/test_wttr.py
|
# Copyright (c) 2021 elParaguayo
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from libqtile import widget
RESPONSE = "London: +17°C"
def test_wttr_methods():
wttr = widget.Wttr(location={"London": "Home"})
assert wttr._get_url() == "https://wttr.in/London?m&format=3&lang=en"
assert wttr.parse(RESPONSE) == "Home: +17°C"
def test_wttr_no_location():
wttr = widget.Wttr()
assert wttr._get_url() is None
|
Add test for wttr widget
|
Add test for wttr widget
|
Python
|
mit
|
qtile/qtile,qtile/qtile,ramnes/qtile,ramnes/qtile
|
Add test for wttr widget
|
# Copyright (c) 2021 elParaguayo
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from libqtile import widget
RESPONSE = "London: +17°C"
def test_wttr_methods():
wttr = widget.Wttr(location={"London": "Home"})
assert wttr._get_url() == "https://wttr.in/London?m&format=3&lang=en"
assert wttr.parse(RESPONSE) == "Home: +17°C"
def test_wttr_no_location():
wttr = widget.Wttr()
assert wttr._get_url() is None
|
<commit_before><commit_msg>Add test for wttr widget<commit_after>
|
# Copyright (c) 2021 elParaguayo
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from libqtile import widget
RESPONSE = "London: +17°C"
def test_wttr_methods():
wttr = widget.Wttr(location={"London": "Home"})
assert wttr._get_url() == "https://wttr.in/London?m&format=3&lang=en"
assert wttr.parse(RESPONSE) == "Home: +17°C"
def test_wttr_no_location():
wttr = widget.Wttr()
assert wttr._get_url() is None
|
Add test for wttr widget# Copyright (c) 2021 elParaguayo
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from libqtile import widget
RESPONSE = "London: +17°C"
def test_wttr_methods():
wttr = widget.Wttr(location={"London": "Home"})
assert wttr._get_url() == "https://wttr.in/London?m&format=3&lang=en"
assert wttr.parse(RESPONSE) == "Home: +17°C"
def test_wttr_no_location():
wttr = widget.Wttr()
assert wttr._get_url() is None
|
<commit_before><commit_msg>Add test for wttr widget<commit_after># Copyright (c) 2021 elParaguayo
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from libqtile import widget
RESPONSE = "London: +17°C"
def test_wttr_methods():
wttr = widget.Wttr(location={"London": "Home"})
assert wttr._get_url() == "https://wttr.in/London?m&format=3&lang=en"
assert wttr.parse(RESPONSE) == "Home: +17°C"
def test_wttr_no_location():
wttr = widget.Wttr()
assert wttr._get_url() is None
|
|
71bed1a9a7a0a042d3e0964f06cee900f6d7d9ae
|
stmhal/boards/STM32F4DISC/staccel.py
|
stmhal/boards/STM32F4DISC/staccel.py
|
"""
Driver for accelerometer on STM32F4 Discover board.
Assumes it's a LIS302DL MEMS device.
Not currently working.
See:
STM32Cube_FW_F4_V1.1.0/Drivers/BSP/Components/lis302dl/lis302dl.h
STM32Cube_FW_F4_V1.1.0/Drivers/BSP/Components/lis302dl/lis302dl.c
STM32Cube_FW_F4_V1.1.0/Drivers/BSP/STM32F4-Discovery/stm32f4_discovery.c
STM32Cube_FW_F4_V1.1.0/Drivers/BSP/STM32F4-Discovery/stm32f4_discovery.h
STM32Cube_FW_F4_V1.1.0/Drivers/BSP/STM32F4-Discovery/stm32f4_discovery_accelerometer.c
STM32Cube_FW_F4_V1.1.0/Drivers/BSP/STM32F4-Discovery/stm32f4_discovery_accelerometer.h
STM32Cube_FW_F4_V1.1.0/Projects/STM32F4-Discovery/Demonstrations/Src/main.c
"""
from pyb import Pin
from pyb import SPI
READWRITE_CMD = const(0x80)
MULTIPLEBYTE_CMD = const(0x40)
LIS302DL_WHO_AM_I_ADDR = const(0x0f)
LIS302DL_CTRL_REG1_ADDR = const(0x20)
class STAccel:
def __init__(self):
self.cs_pin = Pin('PE3', Pin.OUT_PP, Pin.PULL_NONE)
self.cs_pin.high()
self.spi = SPI(1, SPI.MASTER, baudrate=328125, polarity=0, phase=1, bits=8)
def rd(self, addr, nbytes):
if nbytes > 1:
addr |= READWRITE_CMD | MULTIPLEBYTE_CMD
else:
addr |= READWRITE_CMD
self.cs_pin.low()
self.spi.send(addr)
buf = self.spi.send_recv(bytearray(nbytes * [0])) # read data, MSB first
self.cs_pin.high()
return buf
def wr(self, addr, buf):
if len(buf) > 1:
addr |= MULTIPLEBYTE_CMD
self.cs_pin.low()
self.spi.send(addr)
for b in buf:
self.spi.send(b)
self.cs_pin.high()
def read_id(self):
return self.rd(LIS302DL_WHO_AM_I_ADDR, 1)
def init(self, init_param):
self.wr(LIS302DL_CTRL_REG1_ADDR, bytearray([init_param]))
|
Add preliminary driver for ST32F4DISC accelerometer.
|
stmhal: Add preliminary driver for ST32F4DISC accelerometer.
Written in Python, not currently working. See issue #725.
|
Python
|
mit
|
torwag/micropython,emfcamp/micropython,jlillest/micropython,PappaPeppar/micropython,rubencabrera/micropython,tralamazza/micropython,Peetz0r/micropython-esp32,SHA2017-badge/micropython-esp32,ruffy91/micropython,SungEun-Steve-Kim/test-mp,infinnovation/micropython,supergis/micropython,xhat/micropython,emfcamp/micropython,TDAbboud/micropython,ahotam/micropython,AriZuu/micropython,AriZuu/micropython,HenrikSolver/micropython,blazewicz/micropython,lbattraw/micropython,kerneltask/micropython,ruffy91/micropython,adafruit/micropython,tralamazza/micropython,stonegithubs/micropython,trezor/micropython,jlillest/micropython,ryannathans/micropython,utopiaprince/micropython,adafruit/circuitpython,adafruit/micropython,mgyenik/micropython,ganshun666/micropython,selste/micropython,henriknelson/micropython,Vogtinator/micropython,turbinenreiter/micropython,dhylands/micropython,AriZuu/micropython,supergis/micropython,EcmaXp/micropython,MrSurly/micropython-esp32,micropython/micropython-esp32,blazewicz/micropython,omtinez/micropython,pramasoul/micropython,ericsnowcurrently/micropython,dhylands/micropython,mianos/micropython,vriera/micropython,xhat/micropython,ChuckM/micropython,pozetroninc/micropython,pozetroninc/micropython,methoxid/micropystat,heisewangluo/micropython,utopiaprince/micropython,aethaniel/micropython,cnoviello/micropython,warner83/micropython,jmarcelino/pycom-micropython,hosaka/micropython,AriZuu/micropython,cloudformdesign/micropython,MrSurly/micropython,pramasoul/micropython,mhoffma/micropython,omtinez/micropython,Timmenem/micropython,blazewicz/micropython,hosaka/micropython,oopy/micropython,tdautc19841202/micropython,dmazzella/micropython,turbinenreiter/micropython,dhylands/micropython,xyb/micropython,methoxid/micropystat,matthewelse/micropython,lowRISC/micropython,mianos/micropython,lowRISC/micropython,ericsnowcurrently/micropython,ceramos/micropython,rubencabrera/micropython,xuxiaoxin/micropython,ryannathans/micropython,alex-robbins/micropython,bvernoux/micropython,mianos/micropython,dxxb/micropython,feilongfl/micropython,kerneltask/micropython,Peetz0r/micropython-esp32,selste/micropython,SHA2017-badge/micropython-esp32,chrisdearman/micropython,bvernoux/micropython,lbattraw/micropython,suda/micropython,chrisdearman/micropython,omtinez/micropython,jlillest/micropython,KISSMonX/micropython,chrisdearman/micropython,ryannathans/micropython,noahwilliamsson/micropython,cnoviello/micropython,heisewangluo/micropython,heisewangluo/micropython,adafruit/circuitpython,martinribelotta/micropython,cwyark/micropython,pfalcon/micropython,vitiral/micropython,noahwilliamsson/micropython,toolmacher/micropython,Timmenem/micropython,hiway/micropython,alex-march/micropython,swegener/micropython,MrSurly/micropython-esp32,methoxid/micropystat,cnoviello/micropython,misterdanb/micropython,noahchense/micropython,warner83/micropython,lbattraw/micropython,galenhz/micropython,paul-xxx/micropython,MrSurly/micropython-esp32,adafruit/micropython,MrSurly/micropython-esp32,dxxb/micropython,supergis/micropython,drrk/micropython,kostyll/micropython,kostyll/micropython,lbattraw/micropython,cwyark/micropython,methoxid/micropystat,adamkh/micropython,Vogtinator/micropython,mhoffma/micropython,ryannathans/micropython,cloudformdesign/micropython,praemdonck/micropython,ernesto-g/micropython,dinau/micropython,redbear/micropython,suda/micropython,henriknelson/micropython,matthewelse/micropython,tdautc19841202/micropython,trezor/micropython,torwag/micropython,torwag/micropython,toolmacher/micropython,rubencabrera/micropython,danicampora/micropython,matthewelse/micropython,EcmaXp/micropython,Peetz0r/micropython-esp32,firstval/micropython,ahotam/micropython,mhoffma/micropython,slzatz/micropython,emfcamp/micropython,alex-robbins/micropython,skybird6672/micropython,cwyark/micropython,micropython/micropython-esp32,mpalomer/micropython,kostyll/micropython,pfalcon/micropython,orionrobots/micropython,mgyenik/micropython,torwag/micropython,vriera/micropython,ryannathans/micropython,dinau/micropython,tralamazza/micropython,firstval/micropython,henriknelson/micropython,mhoffma/micropython,suda/micropython,dxxb/micropython,ernesto-g/micropython,heisewangluo/micropython,jimkmc/micropython,dmazzella/micropython,misterdanb/micropython,galenhz/micropython,orionrobots/micropython,paul-xxx/micropython,chrisdearman/micropython,kerneltask/micropython,ganshun666/micropython,xhat/micropython,cloudformdesign/micropython,jimkmc/micropython,SungEun-Steve-Kim/test-mp,adamkh/micropython,tuc-osg/micropython,adafruit/micropython,martinribelotta/micropython,tralamazza/micropython,ChuckM/micropython,MrSurly/micropython-esp32,feilongfl/micropython,SHA2017-badge/micropython-esp32,mpalomer/micropython,vitiral/micropython,orionrobots/micropython,omtinez/micropython,danicampora/micropython,EcmaXp/micropython,adamkh/micropython,neilh10/micropython,misterdanb/micropython,mpalomer/micropython,HenrikSolver/micropython,infinnovation/micropython,galenhz/micropython,dinau/micropython,ChuckM/micropython,KISSMonX/micropython,bvernoux/micropython,utopiaprince/micropython,noahwilliamsson/micropython,supergis/micropython,lowRISC/micropython,deshipu/micropython,MrSurly/micropython,oopy/micropython,noahchense/micropython,turbinenreiter/micropython,ruffy91/micropython,pfalcon/micropython,ChuckM/micropython,ernesto-g/micropython,firstval/micropython,micropython/micropython-esp32,adafruit/circuitpython,hosaka/micropython,MrSurly/micropython,kostyll/micropython,lowRISC/micropython,skybird6672/micropython,hiway/micropython,toolmacher/micropython,xuxiaoxin/micropython,TDAbboud/micropython,jmarcelino/pycom-micropython,selste/micropython,bvernoux/micropython,Peetz0r/micropython-esp32,pozetroninc/micropython,mpalomer/micropython,micropython/micropython-esp32,stonegithubs/micropython,matthewelse/micropython,SungEun-Steve-Kim/test-mp,infinnovation/micropython,jmarcelino/pycom-micropython,adamkh/micropython,lbattraw/micropython,tobbad/micropython,aethaniel/micropython,tobbad/micropython,tuc-osg/micropython,stonegithubs/micropython,adafruit/circuitpython,pramasoul/micropython,blazewicz/micropython,pfalcon/micropython,Timmenem/micropython,swegener/micropython,drrk/micropython,cwyark/micropython,cloudformdesign/micropython,galenhz/micropython,tdautc19841202/micropython,deshipu/micropython,KISSMonX/micropython,xyb/micropython,suda/micropython,KISSMonX/micropython,alex-robbins/micropython,redbear/micropython,vitiral/micropython,SungEun-Steve-Kim/test-mp,MrSurly/micropython,TDAbboud/micropython,warner83/micropython,jimkmc/micropython,adafruit/micropython,jlillest/micropython,slzatz/micropython,utopiaprince/micropython,ganshun666/micropython,ahotam/micropython,lowRISC/micropython,redbear/micropython,noahwilliamsson/micropython,torwag/micropython,tuc-osg/micropython,firstval/micropython,EcmaXp/micropython,Peetz0r/micropython-esp32,mgyenik/micropython,dinau/micropython,toolmacher/micropython,adafruit/circuitpython,ganshun666/micropython,trezor/micropython,feilongfl/micropython,praemdonck/micropython,mpalomer/micropython,stonegithubs/micropython,feilongfl/micropython,SHA2017-badge/micropython-esp32,utopiaprince/micropython,skybird6672/micropython,vitiral/micropython,skybird6672/micropython,ceramos/micropython,turbinenreiter/micropython,PappaPeppar/micropython,martinribelotta/micropython,jimkmc/micropython,paul-xxx/micropython,PappaPeppar/micropython,tuc-osg/micropython,aethaniel/micropython,HenrikSolver/micropython,alex-march/micropython,toolmacher/micropython,oopy/micropython,vriera/micropython,xuxiaoxin/micropython,tdautc19841202/micropython,cwyark/micropython,slzatz/micropython,stonegithubs/micropython,alex-march/micropython,swegener/micropython,ericsnowcurrently/micropython,AriZuu/micropython,mianos/micropython,noahchense/micropython,dmazzella/micropython,puuu/micropython,blmorris/micropython,ernesto-g/micropython,adafruit/circuitpython,praemdonck/micropython,xyb/micropython,slzatz/micropython,redbear/micropython,puuu/micropython,vriera/micropython,tdautc19841202/micropython,ericsnowcurrently/micropython,omtinez/micropython,hiway/micropython,deshipu/micropython,aethaniel/micropython,misterdanb/micropython,alex-robbins/micropython,hosaka/micropython,jmarcelino/pycom-micropython,bvernoux/micropython,HenrikSolver/micropython,paul-xxx/micropython,suda/micropython,emfcamp/micropython,feilongfl/micropython,ganshun666/micropython,ruffy91/micropython,xuxiaoxin/micropython,turbinenreiter/micropython,henriknelson/micropython,henriknelson/micropython,oopy/micropython,MrSurly/micropython,pramasoul/micropython,danicampora/micropython,alex-robbins/micropython,dxxb/micropython,orionrobots/micropython,blmorris/micropython,aethaniel/micropython,tuc-osg/micropython,dhylands/micropython,martinribelotta/micropython,methoxid/micropystat,supergis/micropython,TDAbboud/micropython,HenrikSolver/micropython,pozetroninc/micropython,hiway/micropython,kostyll/micropython,danicampora/micropython,deshipu/micropython,ruffy91/micropython,dhylands/micropython,xyb/micropython,PappaPeppar/micropython,rubencabrera/micropython,neilh10/micropython,blmorris/micropython,xyb/micropython,ahotam/micropython,micropython/micropython-esp32,tobbad/micropython,kerneltask/micropython,matthewelse/micropython,tobbad/micropython,deshipu/micropython,firstval/micropython,infinnovation/micropython,ChuckM/micropython,Vogtinator/micropython,dmazzella/micropython,mgyenik/micropython,ceramos/micropython,noahchense/micropython,dinau/micropython,tobbad/micropython,rubencabrera/micropython,swegener/micropython,oopy/micropython,warner83/micropython,danicampora/micropython,SungEun-Steve-Kim/test-mp,redbear/micropython,KISSMonX/micropython,skybird6672/micropython,cnoviello/micropython,Timmenem/micropython,praemdonck/micropython,vriera/micropython,misterdanb/micropython,selste/micropython,cnoviello/micropython,paul-xxx/micropython,jlillest/micropython,pfalcon/micropython,ahotam/micropython,pramasoul/micropython,emfcamp/micropython,ericsnowcurrently/micropython,pozetroninc/micropython,xuxiaoxin/micropython,matthewelse/micropython,ceramos/micropython,drrk/micropython,Timmenem/micropython,dxxb/micropython,puuu/micropython,mgyenik/micropython,jimkmc/micropython,vitiral/micropython,mianos/micropython,neilh10/micropython,Vogtinator/micropython,blmorris/micropython,drrk/micropython,hosaka/micropython,warner83/micropython,slzatz/micropython,martinribelotta/micropython,chrisdearman/micropython,infinnovation/micropython,kerneltask/micropython,selste/micropython,trezor/micropython,xhat/micropython,xhat/micropython,noahchense/micropython,Vogtinator/micropython,PappaPeppar/micropython,jmarcelino/pycom-micropython,adamkh/micropython,hiway/micropython,noahwilliamsson/micropython,praemdonck/micropython,puuu/micropython,mhoffma/micropython,TDAbboud/micropython,EcmaXp/micropython,swegener/micropython,galenhz/micropython,neilh10/micropython,ernesto-g/micropython,alex-march/micropython,puuu/micropython,blazewicz/micropython,orionrobots/micropython,drrk/micropython,ceramos/micropython,alex-march/micropython,heisewangluo/micropython,SHA2017-badge/micropython-esp32,cloudformdesign/micropython,neilh10/micropython,blmorris/micropython,trezor/micropython
|
stmhal: Add preliminary driver for ST32F4DISC accelerometer.
Written in Python, not currently working. See issue #725.
|
"""
Driver for accelerometer on STM32F4 Discover board.
Assumes it's a LIS302DL MEMS device.
Not currently working.
See:
STM32Cube_FW_F4_V1.1.0/Drivers/BSP/Components/lis302dl/lis302dl.h
STM32Cube_FW_F4_V1.1.0/Drivers/BSP/Components/lis302dl/lis302dl.c
STM32Cube_FW_F4_V1.1.0/Drivers/BSP/STM32F4-Discovery/stm32f4_discovery.c
STM32Cube_FW_F4_V1.1.0/Drivers/BSP/STM32F4-Discovery/stm32f4_discovery.h
STM32Cube_FW_F4_V1.1.0/Drivers/BSP/STM32F4-Discovery/stm32f4_discovery_accelerometer.c
STM32Cube_FW_F4_V1.1.0/Drivers/BSP/STM32F4-Discovery/stm32f4_discovery_accelerometer.h
STM32Cube_FW_F4_V1.1.0/Projects/STM32F4-Discovery/Demonstrations/Src/main.c
"""
from pyb import Pin
from pyb import SPI
READWRITE_CMD = const(0x80)
MULTIPLEBYTE_CMD = const(0x40)
LIS302DL_WHO_AM_I_ADDR = const(0x0f)
LIS302DL_CTRL_REG1_ADDR = const(0x20)
class STAccel:
def __init__(self):
self.cs_pin = Pin('PE3', Pin.OUT_PP, Pin.PULL_NONE)
self.cs_pin.high()
self.spi = SPI(1, SPI.MASTER, baudrate=328125, polarity=0, phase=1, bits=8)
def rd(self, addr, nbytes):
if nbytes > 1:
addr |= READWRITE_CMD | MULTIPLEBYTE_CMD
else:
addr |= READWRITE_CMD
self.cs_pin.low()
self.spi.send(addr)
buf = self.spi.send_recv(bytearray(nbytes * [0])) # read data, MSB first
self.cs_pin.high()
return buf
def wr(self, addr, buf):
if len(buf) > 1:
addr |= MULTIPLEBYTE_CMD
self.cs_pin.low()
self.spi.send(addr)
for b in buf:
self.spi.send(b)
self.cs_pin.high()
def read_id(self):
return self.rd(LIS302DL_WHO_AM_I_ADDR, 1)
def init(self, init_param):
self.wr(LIS302DL_CTRL_REG1_ADDR, bytearray([init_param]))
|
<commit_before><commit_msg>stmhal: Add preliminary driver for ST32F4DISC accelerometer.
Written in Python, not currently working. See issue #725.<commit_after>
|
"""
Driver for accelerometer on STM32F4 Discover board.
Assumes it's a LIS302DL MEMS device.
Not currently working.
See:
STM32Cube_FW_F4_V1.1.0/Drivers/BSP/Components/lis302dl/lis302dl.h
STM32Cube_FW_F4_V1.1.0/Drivers/BSP/Components/lis302dl/lis302dl.c
STM32Cube_FW_F4_V1.1.0/Drivers/BSP/STM32F4-Discovery/stm32f4_discovery.c
STM32Cube_FW_F4_V1.1.0/Drivers/BSP/STM32F4-Discovery/stm32f4_discovery.h
STM32Cube_FW_F4_V1.1.0/Drivers/BSP/STM32F4-Discovery/stm32f4_discovery_accelerometer.c
STM32Cube_FW_F4_V1.1.0/Drivers/BSP/STM32F4-Discovery/stm32f4_discovery_accelerometer.h
STM32Cube_FW_F4_V1.1.0/Projects/STM32F4-Discovery/Demonstrations/Src/main.c
"""
from pyb import Pin
from pyb import SPI
READWRITE_CMD = const(0x80)
MULTIPLEBYTE_CMD = const(0x40)
LIS302DL_WHO_AM_I_ADDR = const(0x0f)
LIS302DL_CTRL_REG1_ADDR = const(0x20)
class STAccel:
def __init__(self):
self.cs_pin = Pin('PE3', Pin.OUT_PP, Pin.PULL_NONE)
self.cs_pin.high()
self.spi = SPI(1, SPI.MASTER, baudrate=328125, polarity=0, phase=1, bits=8)
def rd(self, addr, nbytes):
if nbytes > 1:
addr |= READWRITE_CMD | MULTIPLEBYTE_CMD
else:
addr |= READWRITE_CMD
self.cs_pin.low()
self.spi.send(addr)
buf = self.spi.send_recv(bytearray(nbytes * [0])) # read data, MSB first
self.cs_pin.high()
return buf
def wr(self, addr, buf):
if len(buf) > 1:
addr |= MULTIPLEBYTE_CMD
self.cs_pin.low()
self.spi.send(addr)
for b in buf:
self.spi.send(b)
self.cs_pin.high()
def read_id(self):
return self.rd(LIS302DL_WHO_AM_I_ADDR, 1)
def init(self, init_param):
self.wr(LIS302DL_CTRL_REG1_ADDR, bytearray([init_param]))
|
stmhal: Add preliminary driver for ST32F4DISC accelerometer.
Written in Python, not currently working. See issue #725."""
Driver for accelerometer on STM32F4 Discover board.
Assumes it's a LIS302DL MEMS device.
Not currently working.
See:
STM32Cube_FW_F4_V1.1.0/Drivers/BSP/Components/lis302dl/lis302dl.h
STM32Cube_FW_F4_V1.1.0/Drivers/BSP/Components/lis302dl/lis302dl.c
STM32Cube_FW_F4_V1.1.0/Drivers/BSP/STM32F4-Discovery/stm32f4_discovery.c
STM32Cube_FW_F4_V1.1.0/Drivers/BSP/STM32F4-Discovery/stm32f4_discovery.h
STM32Cube_FW_F4_V1.1.0/Drivers/BSP/STM32F4-Discovery/stm32f4_discovery_accelerometer.c
STM32Cube_FW_F4_V1.1.0/Drivers/BSP/STM32F4-Discovery/stm32f4_discovery_accelerometer.h
STM32Cube_FW_F4_V1.1.0/Projects/STM32F4-Discovery/Demonstrations/Src/main.c
"""
from pyb import Pin
from pyb import SPI
READWRITE_CMD = const(0x80)
MULTIPLEBYTE_CMD = const(0x40)
LIS302DL_WHO_AM_I_ADDR = const(0x0f)
LIS302DL_CTRL_REG1_ADDR = const(0x20)
class STAccel:
def __init__(self):
self.cs_pin = Pin('PE3', Pin.OUT_PP, Pin.PULL_NONE)
self.cs_pin.high()
self.spi = SPI(1, SPI.MASTER, baudrate=328125, polarity=0, phase=1, bits=8)
def rd(self, addr, nbytes):
if nbytes > 1:
addr |= READWRITE_CMD | MULTIPLEBYTE_CMD
else:
addr |= READWRITE_CMD
self.cs_pin.low()
self.spi.send(addr)
buf = self.spi.send_recv(bytearray(nbytes * [0])) # read data, MSB first
self.cs_pin.high()
return buf
def wr(self, addr, buf):
if len(buf) > 1:
addr |= MULTIPLEBYTE_CMD
self.cs_pin.low()
self.spi.send(addr)
for b in buf:
self.spi.send(b)
self.cs_pin.high()
def read_id(self):
return self.rd(LIS302DL_WHO_AM_I_ADDR, 1)
def init(self, init_param):
self.wr(LIS302DL_CTRL_REG1_ADDR, bytearray([init_param]))
|
<commit_before><commit_msg>stmhal: Add preliminary driver for ST32F4DISC accelerometer.
Written in Python, not currently working. See issue #725.<commit_after>"""
Driver for accelerometer on STM32F4 Discover board.
Assumes it's a LIS302DL MEMS device.
Not currently working.
See:
STM32Cube_FW_F4_V1.1.0/Drivers/BSP/Components/lis302dl/lis302dl.h
STM32Cube_FW_F4_V1.1.0/Drivers/BSP/Components/lis302dl/lis302dl.c
STM32Cube_FW_F4_V1.1.0/Drivers/BSP/STM32F4-Discovery/stm32f4_discovery.c
STM32Cube_FW_F4_V1.1.0/Drivers/BSP/STM32F4-Discovery/stm32f4_discovery.h
STM32Cube_FW_F4_V1.1.0/Drivers/BSP/STM32F4-Discovery/stm32f4_discovery_accelerometer.c
STM32Cube_FW_F4_V1.1.0/Drivers/BSP/STM32F4-Discovery/stm32f4_discovery_accelerometer.h
STM32Cube_FW_F4_V1.1.0/Projects/STM32F4-Discovery/Demonstrations/Src/main.c
"""
from pyb import Pin
from pyb import SPI
READWRITE_CMD = const(0x80)
MULTIPLEBYTE_CMD = const(0x40)
LIS302DL_WHO_AM_I_ADDR = const(0x0f)
LIS302DL_CTRL_REG1_ADDR = const(0x20)
class STAccel:
def __init__(self):
self.cs_pin = Pin('PE3', Pin.OUT_PP, Pin.PULL_NONE)
self.cs_pin.high()
self.spi = SPI(1, SPI.MASTER, baudrate=328125, polarity=0, phase=1, bits=8)
def rd(self, addr, nbytes):
if nbytes > 1:
addr |= READWRITE_CMD | MULTIPLEBYTE_CMD
else:
addr |= READWRITE_CMD
self.cs_pin.low()
self.spi.send(addr)
buf = self.spi.send_recv(bytearray(nbytes * [0])) # read data, MSB first
self.cs_pin.high()
return buf
def wr(self, addr, buf):
if len(buf) > 1:
addr |= MULTIPLEBYTE_CMD
self.cs_pin.low()
self.spi.send(addr)
for b in buf:
self.spi.send(b)
self.cs_pin.high()
def read_id(self):
return self.rd(LIS302DL_WHO_AM_I_ADDR, 1)
def init(self, init_param):
self.wr(LIS302DL_CTRL_REG1_ADDR, bytearray([init_param]))
|
|
b596be1a82a9024471b4afafc8922292c300f1e9
|
systemvm/patches/debian/config/opt/cloud/bin/update_config.py
|
systemvm/patches/debian/config/opt/cloud/bin/update_config.py
|
#!/usr/bin/python
import syslog
import sys
# first commandline argument should be the file to process
if ( len(sys.argv) != 2 ):
print "Invalid usage"
sys.exit(1)
json_file = sys.argv[1]
syslog.syslog(sys.argv[0] + " called for file " + json_file)
|
Add a placeholder for the trigger file
|
Add a placeholder for the trigger file
|
Python
|
apache-2.0
|
GabrielBrascher/cloudstack,GabrielBrascher/cloudstack,resmo/cloudstack,resmo/cloudstack,wido/cloudstack,jcshen007/cloudstack,resmo/cloudstack,jcshen007/cloudstack,wido/cloudstack,DaanHoogland/cloudstack,wido/cloudstack,GabrielBrascher/cloudstack,jcshen007/cloudstack,jcshen007/cloudstack,DaanHoogland/cloudstack,resmo/cloudstack,DaanHoogland/cloudstack,resmo/cloudstack,DaanHoogland/cloudstack,wido/cloudstack,GabrielBrascher/cloudstack,wido/cloudstack,GabrielBrascher/cloudstack,jcshen007/cloudstack,jcshen007/cloudstack,DaanHoogland/cloudstack,DaanHoogland/cloudstack,DaanHoogland/cloudstack,resmo/cloudstack,wido/cloudstack,resmo/cloudstack,jcshen007/cloudstack,wido/cloudstack,GabrielBrascher/cloudstack,GabrielBrascher/cloudstack
|
Add a placeholder for the trigger file
|
#!/usr/bin/python
import syslog
import sys
# first commandline argument should be the file to process
if ( len(sys.argv) != 2 ):
print "Invalid usage"
sys.exit(1)
json_file = sys.argv[1]
syslog.syslog(sys.argv[0] + " called for file " + json_file)
|
<commit_before><commit_msg>Add a placeholder for the trigger file<commit_after>
|
#!/usr/bin/python
import syslog
import sys
# first commandline argument should be the file to process
if ( len(sys.argv) != 2 ):
print "Invalid usage"
sys.exit(1)
json_file = sys.argv[1]
syslog.syslog(sys.argv[0] + " called for file " + json_file)
|
Add a placeholder for the trigger file#!/usr/bin/python
import syslog
import sys
# first commandline argument should be the file to process
if ( len(sys.argv) != 2 ):
print "Invalid usage"
sys.exit(1)
json_file = sys.argv[1]
syslog.syslog(sys.argv[0] + " called for file " + json_file)
|
<commit_before><commit_msg>Add a placeholder for the trigger file<commit_after>#!/usr/bin/python
import syslog
import sys
# first commandline argument should be the file to process
if ( len(sys.argv) != 2 ):
print "Invalid usage"
sys.exit(1)
json_file = sys.argv[1]
syslog.syslog(sys.argv[0] + " called for file " + json_file)
|
|
962f7f1184803401b9d8cb3423cd574d55bcf9e4
|
tools/tree.py
|
tools/tree.py
|
import sys
from mininet.net import Mininet
from mininet.topo import Topo
from mininet.log import setLogLevel
from mininet.cli import CLI
from mininet.topolib import TreeTopo
from mininet.node import RemoteController, OVSKernelSwitch
myTree = TreeTopo(depth=3,fanout=4)
def run( controllers ):
net = Mininet( topo=myTree, controller=None, autoSetMacs=True )
ctrl_count = 0
for controllerIP in controllers:
net.addController( 'c%d' % ctrl_count, RemoteController, ip=controllerIP )
ctrl_count += 1
net.start()
CLI( net )
net.topo()
if __name__ == '__main__':
setLogLevel( 'info' )
if len(sys.argv ) > 1:
controllers = sys.argv[ 1: ]
else:
print 'Usage: sudo python tree.py '
exit(1)
run(controllers)
|
Add mininet script for test topology
|
Add mininet script for test topology
|
Python
|
apache-2.0
|
sangyun-han/sidekick,sangyun-han/sidekick,sangyun-han/sidekick,sangyun-han/sidekick
|
Add mininet script for test topology
|
import sys
from mininet.net import Mininet
from mininet.topo import Topo
from mininet.log import setLogLevel
from mininet.cli import CLI
from mininet.topolib import TreeTopo
from mininet.node import RemoteController, OVSKernelSwitch
myTree = TreeTopo(depth=3,fanout=4)
def run( controllers ):
net = Mininet( topo=myTree, controller=None, autoSetMacs=True )
ctrl_count = 0
for controllerIP in controllers:
net.addController( 'c%d' % ctrl_count, RemoteController, ip=controllerIP )
ctrl_count += 1
net.start()
CLI( net )
net.topo()
if __name__ == '__main__':
setLogLevel( 'info' )
if len(sys.argv ) > 1:
controllers = sys.argv[ 1: ]
else:
print 'Usage: sudo python tree.py '
exit(1)
run(controllers)
|
<commit_before><commit_msg>Add mininet script for test topology<commit_after>
|
import sys
from mininet.net import Mininet
from mininet.topo import Topo
from mininet.log import setLogLevel
from mininet.cli import CLI
from mininet.topolib import TreeTopo
from mininet.node import RemoteController, OVSKernelSwitch
myTree = TreeTopo(depth=3,fanout=4)
def run( controllers ):
net = Mininet( topo=myTree, controller=None, autoSetMacs=True )
ctrl_count = 0
for controllerIP in controllers:
net.addController( 'c%d' % ctrl_count, RemoteController, ip=controllerIP )
ctrl_count += 1
net.start()
CLI( net )
net.topo()
if __name__ == '__main__':
setLogLevel( 'info' )
if len(sys.argv ) > 1:
controllers = sys.argv[ 1: ]
else:
print 'Usage: sudo python tree.py '
exit(1)
run(controllers)
|
Add mininet script for test topologyimport sys
from mininet.net import Mininet
from mininet.topo import Topo
from mininet.log import setLogLevel
from mininet.cli import CLI
from mininet.topolib import TreeTopo
from mininet.node import RemoteController, OVSKernelSwitch
myTree = TreeTopo(depth=3,fanout=4)
def run( controllers ):
net = Mininet( topo=myTree, controller=None, autoSetMacs=True )
ctrl_count = 0
for controllerIP in controllers:
net.addController( 'c%d' % ctrl_count, RemoteController, ip=controllerIP )
ctrl_count += 1
net.start()
CLI( net )
net.topo()
if __name__ == '__main__':
setLogLevel( 'info' )
if len(sys.argv ) > 1:
controllers = sys.argv[ 1: ]
else:
print 'Usage: sudo python tree.py '
exit(1)
run(controllers)
|
<commit_before><commit_msg>Add mininet script for test topology<commit_after>import sys
from mininet.net import Mininet
from mininet.topo import Topo
from mininet.log import setLogLevel
from mininet.cli import CLI
from mininet.topolib import TreeTopo
from mininet.node import RemoteController, OVSKernelSwitch
myTree = TreeTopo(depth=3,fanout=4)
def run( controllers ):
net = Mininet( topo=myTree, controller=None, autoSetMacs=True )
ctrl_count = 0
for controllerIP in controllers:
net.addController( 'c%d' % ctrl_count, RemoteController, ip=controllerIP )
ctrl_count += 1
net.start()
CLI( net )
net.topo()
if __name__ == '__main__':
setLogLevel( 'info' )
if len(sys.argv ) > 1:
controllers = sys.argv[ 1: ]
else:
print 'Usage: sudo python tree.py '
exit(1)
run(controllers)
|
|
9e27800d4e94b9f13810b9c939bd62ef541f7f12
|
AddDataTest.py
|
AddDataTest.py
|
__author__ = 'chuqiao'
import script
script.addDataToSolrFromUrl("http://www.elixir-europe.org:8080/events", "http://www.elixir-europe.org:8080/events");
script.addDataToSolrFromUrl("http://localhost/ep/events?state=published&field_type_tid=All", "http://localhost/ep/events");
|
Create Add Data Test script
|
Create Add Data Test script
|
Python
|
mit
|
elixirhub/events-portal-scraping-scripts
|
Create Add Data Test script
|
__author__ = 'chuqiao'
import script
script.addDataToSolrFromUrl("http://www.elixir-europe.org:8080/events", "http://www.elixir-europe.org:8080/events");
script.addDataToSolrFromUrl("http://localhost/ep/events?state=published&field_type_tid=All", "http://localhost/ep/events");
|
<commit_before><commit_msg>Create Add Data Test script<commit_after>
|
__author__ = 'chuqiao'
import script
script.addDataToSolrFromUrl("http://www.elixir-europe.org:8080/events", "http://www.elixir-europe.org:8080/events");
script.addDataToSolrFromUrl("http://localhost/ep/events?state=published&field_type_tid=All", "http://localhost/ep/events");
|
Create Add Data Test script__author__ = 'chuqiao'
import script
script.addDataToSolrFromUrl("http://www.elixir-europe.org:8080/events", "http://www.elixir-europe.org:8080/events");
script.addDataToSolrFromUrl("http://localhost/ep/events?state=published&field_type_tid=All", "http://localhost/ep/events");
|
<commit_before><commit_msg>Create Add Data Test script<commit_after>__author__ = 'chuqiao'
import script
script.addDataToSolrFromUrl("http://www.elixir-europe.org:8080/events", "http://www.elixir-europe.org:8080/events");
script.addDataToSolrFromUrl("http://localhost/ep/events?state=published&field_type_tid=All", "http://localhost/ep/events");
|
|
6503a8fc3c9f47b5e33d7ad520ac48b0d452cf70
|
pyfr/bases.py
|
pyfr/bases.py
|
# -*- coding: utf-8 -*-
import sympy as sy
import numpy as np
import itertools
def lagrange_basis(points, sym):
"""Generates a basis of polynomials, :math:`l_i(x)`, such that
.. math::
l_i(x) =
\begin{cases}
0 & x\neq p_{i}\\
1 & x=p_{i}
\end{cases}
where :math:`p_i` is the i'th entry in *points*
"""
n = len(points)
return [sy.interpolating_poly(n, sym, points, (0,)*i + (1,) + (0,)*(n-i))
for i in xrange(n)]
def cart_prod_points(points, ndim):
"""Performs a cartesian product extension of *points* into *ndim*
For idiosyncratic reason the counting order of indices is from
first to last, i.e, it is the first index the counts quickest,
followed by the second index and so on.
**Example**
>>> cart_prod_points([-1, 0, 1], 2)
array([[-1., -1.],
[ 0., -1.],
[ 1., -1.],
[-1., 0.],
[ 0., 0.],
[ 1., 0.],
[-1., 1.],
[ 0., 1.],
[ 1., 1.]])
"""
npoints = len(points)
cprodpts = np.empty((npoints,)*ndim + (ndim,))
for i,ax in enumerate(np.ix_(*(points,)*ndim)):
# -i-1 ensures we count first-to-last
cprodpts[...,-i-1] = ax
# Compact into an array of ndim component tuples
return cprodpts.reshape((np.prod(cprodpts.shape[:-1]), ndim))
def cart_prod_basis(points, dims, basisfn):
"""Performs a cartesian product extension of a basis
.. note::
This function adopts the same first-to-last counting order as
:func:`cart_prod_points` with the first index varying quickest.
**Example**
>>> import sympy as sy
>>> cb = cart_prod_basis([-1, 1], sy.symbols('p q'), lagrange_basis)
>>> cb[0]
(-p/2 + 1/2)*(-q/2 + 1/2)
>>> cb[0].subs(dict(p=-1, q=-1))
1
>>> cb[0].subs(dict(p=1, q=-1))
0
"""
# Evaluate the basis function in terms of each dimension (r,q,p)
basis = [basisfn(points, d) for d in reversed(dims)]
# Take the cartesian product of these and multiply the resulting tuples
return [np.prod(b) for b in itertools.product(*basis)]
|
Add a set of functions related to basis-computation.
|
Add a set of functions related to basis-computation.
|
Python
|
bsd-3-clause
|
BrianVermeire/PyFR,tjcorona/PyFR,tjcorona/PyFR,tjcorona/PyFR,Aerojspark/PyFR,iyer-arvind/PyFR
|
Add a set of functions related to basis-computation.
|
# -*- coding: utf-8 -*-
import sympy as sy
import numpy as np
import itertools
def lagrange_basis(points, sym):
"""Generates a basis of polynomials, :math:`l_i(x)`, such that
.. math::
l_i(x) =
\begin{cases}
0 & x\neq p_{i}\\
1 & x=p_{i}
\end{cases}
where :math:`p_i` is the i'th entry in *points*
"""
n = len(points)
return [sy.interpolating_poly(n, sym, points, (0,)*i + (1,) + (0,)*(n-i))
for i in xrange(n)]
def cart_prod_points(points, ndim):
"""Performs a cartesian product extension of *points* into *ndim*
For idiosyncratic reason the counting order of indices is from
first to last, i.e, it is the first index the counts quickest,
followed by the second index and so on.
**Example**
>>> cart_prod_points([-1, 0, 1], 2)
array([[-1., -1.],
[ 0., -1.],
[ 1., -1.],
[-1., 0.],
[ 0., 0.],
[ 1., 0.],
[-1., 1.],
[ 0., 1.],
[ 1., 1.]])
"""
npoints = len(points)
cprodpts = np.empty((npoints,)*ndim + (ndim,))
for i,ax in enumerate(np.ix_(*(points,)*ndim)):
# -i-1 ensures we count first-to-last
cprodpts[...,-i-1] = ax
# Compact into an array of ndim component tuples
return cprodpts.reshape((np.prod(cprodpts.shape[:-1]), ndim))
def cart_prod_basis(points, dims, basisfn):
"""Performs a cartesian product extension of a basis
.. note::
This function adopts the same first-to-last counting order as
:func:`cart_prod_points` with the first index varying quickest.
**Example**
>>> import sympy as sy
>>> cb = cart_prod_basis([-1, 1], sy.symbols('p q'), lagrange_basis)
>>> cb[0]
(-p/2 + 1/2)*(-q/2 + 1/2)
>>> cb[0].subs(dict(p=-1, q=-1))
1
>>> cb[0].subs(dict(p=1, q=-1))
0
"""
# Evaluate the basis function in terms of each dimension (r,q,p)
basis = [basisfn(points, d) for d in reversed(dims)]
# Take the cartesian product of these and multiply the resulting tuples
return [np.prod(b) for b in itertools.product(*basis)]
|
<commit_before><commit_msg>Add a set of functions related to basis-computation.<commit_after>
|
# -*- coding: utf-8 -*-
import sympy as sy
import numpy as np
import itertools
def lagrange_basis(points, sym):
"""Generates a basis of polynomials, :math:`l_i(x)`, such that
.. math::
l_i(x) =
\begin{cases}
0 & x\neq p_{i}\\
1 & x=p_{i}
\end{cases}
where :math:`p_i` is the i'th entry in *points*
"""
n = len(points)
return [sy.interpolating_poly(n, sym, points, (0,)*i + (1,) + (0,)*(n-i))
for i in xrange(n)]
def cart_prod_points(points, ndim):
"""Performs a cartesian product extension of *points* into *ndim*
For idiosyncratic reason the counting order of indices is from
first to last, i.e, it is the first index the counts quickest,
followed by the second index and so on.
**Example**
>>> cart_prod_points([-1, 0, 1], 2)
array([[-1., -1.],
[ 0., -1.],
[ 1., -1.],
[-1., 0.],
[ 0., 0.],
[ 1., 0.],
[-1., 1.],
[ 0., 1.],
[ 1., 1.]])
"""
npoints = len(points)
cprodpts = np.empty((npoints,)*ndim + (ndim,))
for i,ax in enumerate(np.ix_(*(points,)*ndim)):
# -i-1 ensures we count first-to-last
cprodpts[...,-i-1] = ax
# Compact into an array of ndim component tuples
return cprodpts.reshape((np.prod(cprodpts.shape[:-1]), ndim))
def cart_prod_basis(points, dims, basisfn):
"""Performs a cartesian product extension of a basis
.. note::
This function adopts the same first-to-last counting order as
:func:`cart_prod_points` with the first index varying quickest.
**Example**
>>> import sympy as sy
>>> cb = cart_prod_basis([-1, 1], sy.symbols('p q'), lagrange_basis)
>>> cb[0]
(-p/2 + 1/2)*(-q/2 + 1/2)
>>> cb[0].subs(dict(p=-1, q=-1))
1
>>> cb[0].subs(dict(p=1, q=-1))
0
"""
# Evaluate the basis function in terms of each dimension (r,q,p)
basis = [basisfn(points, d) for d in reversed(dims)]
# Take the cartesian product of these and multiply the resulting tuples
return [np.prod(b) for b in itertools.product(*basis)]
|
Add a set of functions related to basis-computation.# -*- coding: utf-8 -*-
import sympy as sy
import numpy as np
import itertools
def lagrange_basis(points, sym):
"""Generates a basis of polynomials, :math:`l_i(x)`, such that
.. math::
l_i(x) =
\begin{cases}
0 & x\neq p_{i}\\
1 & x=p_{i}
\end{cases}
where :math:`p_i` is the i'th entry in *points*
"""
n = len(points)
return [sy.interpolating_poly(n, sym, points, (0,)*i + (1,) + (0,)*(n-i))
for i in xrange(n)]
def cart_prod_points(points, ndim):
"""Performs a cartesian product extension of *points* into *ndim*
For idiosyncratic reason the counting order of indices is from
first to last, i.e, it is the first index the counts quickest,
followed by the second index and so on.
**Example**
>>> cart_prod_points([-1, 0, 1], 2)
array([[-1., -1.],
[ 0., -1.],
[ 1., -1.],
[-1., 0.],
[ 0., 0.],
[ 1., 0.],
[-1., 1.],
[ 0., 1.],
[ 1., 1.]])
"""
npoints = len(points)
cprodpts = np.empty((npoints,)*ndim + (ndim,))
for i,ax in enumerate(np.ix_(*(points,)*ndim)):
# -i-1 ensures we count first-to-last
cprodpts[...,-i-1] = ax
# Compact into an array of ndim component tuples
return cprodpts.reshape((np.prod(cprodpts.shape[:-1]), ndim))
def cart_prod_basis(points, dims, basisfn):
"""Performs a cartesian product extension of a basis
.. note::
This function adopts the same first-to-last counting order as
:func:`cart_prod_points` with the first index varying quickest.
**Example**
>>> import sympy as sy
>>> cb = cart_prod_basis([-1, 1], sy.symbols('p q'), lagrange_basis)
>>> cb[0]
(-p/2 + 1/2)*(-q/2 + 1/2)
>>> cb[0].subs(dict(p=-1, q=-1))
1
>>> cb[0].subs(dict(p=1, q=-1))
0
"""
# Evaluate the basis function in terms of each dimension (r,q,p)
basis = [basisfn(points, d) for d in reversed(dims)]
# Take the cartesian product of these and multiply the resulting tuples
return [np.prod(b) for b in itertools.product(*basis)]
|
<commit_before><commit_msg>Add a set of functions related to basis-computation.<commit_after># -*- coding: utf-8 -*-
import sympy as sy
import numpy as np
import itertools
def lagrange_basis(points, sym):
"""Generates a basis of polynomials, :math:`l_i(x)`, such that
.. math::
l_i(x) =
\begin{cases}
0 & x\neq p_{i}\\
1 & x=p_{i}
\end{cases}
where :math:`p_i` is the i'th entry in *points*
"""
n = len(points)
return [sy.interpolating_poly(n, sym, points, (0,)*i + (1,) + (0,)*(n-i))
for i in xrange(n)]
def cart_prod_points(points, ndim):
"""Performs a cartesian product extension of *points* into *ndim*
For idiosyncratic reason the counting order of indices is from
first to last, i.e, it is the first index the counts quickest,
followed by the second index and so on.
**Example**
>>> cart_prod_points([-1, 0, 1], 2)
array([[-1., -1.],
[ 0., -1.],
[ 1., -1.],
[-1., 0.],
[ 0., 0.],
[ 1., 0.],
[-1., 1.],
[ 0., 1.],
[ 1., 1.]])
"""
npoints = len(points)
cprodpts = np.empty((npoints,)*ndim + (ndim,))
for i,ax in enumerate(np.ix_(*(points,)*ndim)):
# -i-1 ensures we count first-to-last
cprodpts[...,-i-1] = ax
# Compact into an array of ndim component tuples
return cprodpts.reshape((np.prod(cprodpts.shape[:-1]), ndim))
def cart_prod_basis(points, dims, basisfn):
"""Performs a cartesian product extension of a basis
.. note::
This function adopts the same first-to-last counting order as
:func:`cart_prod_points` with the first index varying quickest.
**Example**
>>> import sympy as sy
>>> cb = cart_prod_basis([-1, 1], sy.symbols('p q'), lagrange_basis)
>>> cb[0]
(-p/2 + 1/2)*(-q/2 + 1/2)
>>> cb[0].subs(dict(p=-1, q=-1))
1
>>> cb[0].subs(dict(p=1, q=-1))
0
"""
# Evaluate the basis function in terms of each dimension (r,q,p)
basis = [basisfn(points, d) for d in reversed(dims)]
# Take the cartesian product of these and multiply the resulting tuples
return [np.prod(b) for b in itertools.product(*basis)]
|
|
5443ceecfafea3c78a7153eaa2d38febdb54eb3e
|
tests/test_digest.py
|
tests/test_digest.py
|
from __future__ import unicode_literals, division, absolute_import
from tests import FlexGetBase
class TestDigest(FlexGetBase):
__yaml__ = """
tasks:
digest 1:
mock:
- title: entry 1
accept_all: yes
digest: aoeu
digest 2:
mock:
- title: entry 2
accept_all: yes
digest: aoeu
emit digest:
emit_digest:
list: aoeu
"""
def test_multiple_task_merging(self):
self.execute_task('digest 1')
self.execute_task('digest 2')
self.execute_task('emit digest')
assert len(self.task.entries) == 2
|
Add basic test for digest plugin
|
Add basic test for digest plugin
|
Python
|
mit
|
antivirtel/Flexget,qvazzler/Flexget,tsnoam/Flexget,Danfocus/Flexget,gazpachoking/Flexget,spencerjanssen/Flexget,malkavi/Flexget,crawln45/Flexget,antivirtel/Flexget,crawln45/Flexget,ZefQ/Flexget,vfrc2/Flexget,tobinjt/Flexget,tobinjt/Flexget,LynxyssCZ/Flexget,tobinjt/Flexget,ibrahimkarahan/Flexget,tarzasai/Flexget,drwyrm/Flexget,LynxyssCZ/Flexget,tarzasai/Flexget,jawilson/Flexget,Danfocus/Flexget,cvium/Flexget,ibrahimkarahan/Flexget,ianstalk/Flexget,ratoaq2/Flexget,thalamus/Flexget,tsnoam/Flexget,cvium/Flexget,JorisDeRieck/Flexget,Flexget/Flexget,ratoaq2/Flexget,xfouloux/Flexget,Danfocus/Flexget,Flexget/Flexget,malkavi/Flexget,jacobmetrick/Flexget,jacobmetrick/Flexget,v17al/Flexget,Flexget/Flexget,malkavi/Flexget,dsemi/Flexget,poulpito/Flexget,LynxyssCZ/Flexget,Danfocus/Flexget,cvium/Flexget,ZefQ/Flexget,crawln45/Flexget,dsemi/Flexget,spencerjanssen/Flexget,gazpachoking/Flexget,JorisDeRieck/Flexget,ianstalk/Flexget,jawilson/Flexget,LynxyssCZ/Flexget,grrr2/Flexget,sean797/Flexget,JorisDeRieck/Flexget,xfouloux/Flexget,jawilson/Flexget,tsnoam/Flexget,vfrc2/Flexget,poulpito/Flexget,OmgOhnoes/Flexget,ianstalk/Flexget,OmgOhnoes/Flexget,Flexget/Flexget,tobinjt/Flexget,Pretagonist/Flexget,qk4l/Flexget,tarzasai/Flexget,Pretagonist/Flexget,thalamus/Flexget,offbyone/Flexget,lildadou/Flexget,lildadou/Flexget,xfouloux/Flexget,drwyrm/Flexget,poulpito/Flexget,patsissons/Flexget,antivirtel/Flexget,spencerjanssen/Flexget,grrr2/Flexget,malkavi/Flexget,Pretagonist/Flexget,oxc/Flexget,patsissons/Flexget,jacobmetrick/Flexget,offbyone/Flexget,patsissons/Flexget,qvazzler/Flexget,jawilson/Flexget,grrr2/Flexget,v17al/Flexget,qvazzler/Flexget,JorisDeRieck/Flexget,OmgOhnoes/Flexget,dsemi/Flexget,qk4l/Flexget,lildadou/Flexget,v17al/Flexget,drwyrm/Flexget,oxc/Flexget,qk4l/Flexget,oxc/Flexget,ibrahimkarahan/Flexget,offbyone/Flexget,sean797/Flexget,ZefQ/Flexget,sean797/Flexget,thalamus/Flexget,ratoaq2/Flexget,crawln45/Flexget,vfrc2/Flexget
|
Add basic test for digest plugin
|
from __future__ import unicode_literals, division, absolute_import
from tests import FlexGetBase
class TestDigest(FlexGetBase):
__yaml__ = """
tasks:
digest 1:
mock:
- title: entry 1
accept_all: yes
digest: aoeu
digest 2:
mock:
- title: entry 2
accept_all: yes
digest: aoeu
emit digest:
emit_digest:
list: aoeu
"""
def test_multiple_task_merging(self):
self.execute_task('digest 1')
self.execute_task('digest 2')
self.execute_task('emit digest')
assert len(self.task.entries) == 2
|
<commit_before><commit_msg>Add basic test for digest plugin<commit_after>
|
from __future__ import unicode_literals, division, absolute_import
from tests import FlexGetBase
class TestDigest(FlexGetBase):
__yaml__ = """
tasks:
digest 1:
mock:
- title: entry 1
accept_all: yes
digest: aoeu
digest 2:
mock:
- title: entry 2
accept_all: yes
digest: aoeu
emit digest:
emit_digest:
list: aoeu
"""
def test_multiple_task_merging(self):
self.execute_task('digest 1')
self.execute_task('digest 2')
self.execute_task('emit digest')
assert len(self.task.entries) == 2
|
Add basic test for digest pluginfrom __future__ import unicode_literals, division, absolute_import
from tests import FlexGetBase
class TestDigest(FlexGetBase):
__yaml__ = """
tasks:
digest 1:
mock:
- title: entry 1
accept_all: yes
digest: aoeu
digest 2:
mock:
- title: entry 2
accept_all: yes
digest: aoeu
emit digest:
emit_digest:
list: aoeu
"""
def test_multiple_task_merging(self):
self.execute_task('digest 1')
self.execute_task('digest 2')
self.execute_task('emit digest')
assert len(self.task.entries) == 2
|
<commit_before><commit_msg>Add basic test for digest plugin<commit_after>from __future__ import unicode_literals, division, absolute_import
from tests import FlexGetBase
class TestDigest(FlexGetBase):
__yaml__ = """
tasks:
digest 1:
mock:
- title: entry 1
accept_all: yes
digest: aoeu
digest 2:
mock:
- title: entry 2
accept_all: yes
digest: aoeu
emit digest:
emit_digest:
list: aoeu
"""
def test_multiple_task_merging(self):
self.execute_task('digest 1')
self.execute_task('digest 2')
self.execute_task('emit digest')
assert len(self.task.entries) == 2
|
|
2a5a735d9440ca9998e80239b41410a35b0c972d
|
tools/fixup_translations.py
|
tools/fixup_translations.py
|
# Validate that all entries in the .pot are in every .po. Only the .pot is updated so we can detect
# if a translation was added to the source but isn't in a .po. This ensures translators can grab
# complete files to work on.
import git
import sys
import polib
po_filenames = sys.argv[1:]
repo = git.Repo()
NO_TRANSLATION_WHITELIST = ["ffi_prep_closure_loc"]
bad_commits = {}
for po_filename in po_filenames:
print("Checking", po_filename)
commits = repo.iter_commits(paths=po_filename, reverse=True, topo_order=True)
first_translations = None
fixed_ids = set()
for commit in commits:
try:
blob = commit.tree / po_filename
except KeyError:
continue
try:
current_file = polib.pofile(blob.data_stream.read().decode("utf-8"))
except OSError:
print("skipping invalid po in", commit)
continue
if not first_translations:
first_translations = current_file
continue
print(commit.authored_date, commit)
first_translations.metadata = current_file.metadata
for entry in first_translations:
if entry.msgid == "soft reboot\n":
continue
newer_entry = current_file.find(entry.msgid)
if newer_entry and entry.msgstr != newer_entry.msgstr:
if newer_entry.msgstr != "" and (newer_entry.msgstr != entry.msgid or entry.msgid in NO_TRANSLATION_WHITELIST):
entry.merge(newer_entry)
entry.msgstr = newer_entry.msgstr
elif entry.msgid not in fixed_ids:
if commit not in bad_commits:
bad_commits[commit] = set()
bad_commits[commit].add(po_filename)
fixed_ids.add(entry.msgid)
print(entry.msgid, "\"" + entry.msgstr + "\"", "\"" + newer_entry.msgstr + "\"")
first_translations.save(po_filename)
print()
for commit in bad_commits:
files = bad_commits[commit]
print(commit)
for file in files:
print("\t",file)
|
Add utility to remerge translations
|
Add utility to remerge translations
|
Python
|
mit
|
adafruit/circuitpython,adafruit/circuitpython,adafruit/micropython,adafruit/circuitpython,adafruit/circuitpython,adafruit/micropython,adafruit/micropython,adafruit/circuitpython,adafruit/micropython,adafruit/micropython,adafruit/circuitpython
|
Add utility to remerge translations
|
# Validate that all entries in the .pot are in every .po. Only the .pot is updated so we can detect
# if a translation was added to the source but isn't in a .po. This ensures translators can grab
# complete files to work on.
import git
import sys
import polib
po_filenames = sys.argv[1:]
repo = git.Repo()
NO_TRANSLATION_WHITELIST = ["ffi_prep_closure_loc"]
bad_commits = {}
for po_filename in po_filenames:
print("Checking", po_filename)
commits = repo.iter_commits(paths=po_filename, reverse=True, topo_order=True)
first_translations = None
fixed_ids = set()
for commit in commits:
try:
blob = commit.tree / po_filename
except KeyError:
continue
try:
current_file = polib.pofile(blob.data_stream.read().decode("utf-8"))
except OSError:
print("skipping invalid po in", commit)
continue
if not first_translations:
first_translations = current_file
continue
print(commit.authored_date, commit)
first_translations.metadata = current_file.metadata
for entry in first_translations:
if entry.msgid == "soft reboot\n":
continue
newer_entry = current_file.find(entry.msgid)
if newer_entry and entry.msgstr != newer_entry.msgstr:
if newer_entry.msgstr != "" and (newer_entry.msgstr != entry.msgid or entry.msgid in NO_TRANSLATION_WHITELIST):
entry.merge(newer_entry)
entry.msgstr = newer_entry.msgstr
elif entry.msgid not in fixed_ids:
if commit not in bad_commits:
bad_commits[commit] = set()
bad_commits[commit].add(po_filename)
fixed_ids.add(entry.msgid)
print(entry.msgid, "\"" + entry.msgstr + "\"", "\"" + newer_entry.msgstr + "\"")
first_translations.save(po_filename)
print()
for commit in bad_commits:
files = bad_commits[commit]
print(commit)
for file in files:
print("\t",file)
|
<commit_before><commit_msg>Add utility to remerge translations<commit_after>
|
# Validate that all entries in the .pot are in every .po. Only the .pot is updated so we can detect
# if a translation was added to the source but isn't in a .po. This ensures translators can grab
# complete files to work on.
import git
import sys
import polib
po_filenames = sys.argv[1:]
repo = git.Repo()
NO_TRANSLATION_WHITELIST = ["ffi_prep_closure_loc"]
bad_commits = {}
for po_filename in po_filenames:
print("Checking", po_filename)
commits = repo.iter_commits(paths=po_filename, reverse=True, topo_order=True)
first_translations = None
fixed_ids = set()
for commit in commits:
try:
blob = commit.tree / po_filename
except KeyError:
continue
try:
current_file = polib.pofile(blob.data_stream.read().decode("utf-8"))
except OSError:
print("skipping invalid po in", commit)
continue
if not first_translations:
first_translations = current_file
continue
print(commit.authored_date, commit)
first_translations.metadata = current_file.metadata
for entry in first_translations:
if entry.msgid == "soft reboot\n":
continue
newer_entry = current_file.find(entry.msgid)
if newer_entry and entry.msgstr != newer_entry.msgstr:
if newer_entry.msgstr != "" and (newer_entry.msgstr != entry.msgid or entry.msgid in NO_TRANSLATION_WHITELIST):
entry.merge(newer_entry)
entry.msgstr = newer_entry.msgstr
elif entry.msgid not in fixed_ids:
if commit not in bad_commits:
bad_commits[commit] = set()
bad_commits[commit].add(po_filename)
fixed_ids.add(entry.msgid)
print(entry.msgid, "\"" + entry.msgstr + "\"", "\"" + newer_entry.msgstr + "\"")
first_translations.save(po_filename)
print()
for commit in bad_commits:
files = bad_commits[commit]
print(commit)
for file in files:
print("\t",file)
|
Add utility to remerge translations# Validate that all entries in the .pot are in every .po. Only the .pot is updated so we can detect
# if a translation was added to the source but isn't in a .po. This ensures translators can grab
# complete files to work on.
import git
import sys
import polib
po_filenames = sys.argv[1:]
repo = git.Repo()
NO_TRANSLATION_WHITELIST = ["ffi_prep_closure_loc"]
bad_commits = {}
for po_filename in po_filenames:
print("Checking", po_filename)
commits = repo.iter_commits(paths=po_filename, reverse=True, topo_order=True)
first_translations = None
fixed_ids = set()
for commit in commits:
try:
blob = commit.tree / po_filename
except KeyError:
continue
try:
current_file = polib.pofile(blob.data_stream.read().decode("utf-8"))
except OSError:
print("skipping invalid po in", commit)
continue
if not first_translations:
first_translations = current_file
continue
print(commit.authored_date, commit)
first_translations.metadata = current_file.metadata
for entry in first_translations:
if entry.msgid == "soft reboot\n":
continue
newer_entry = current_file.find(entry.msgid)
if newer_entry and entry.msgstr != newer_entry.msgstr:
if newer_entry.msgstr != "" and (newer_entry.msgstr != entry.msgid or entry.msgid in NO_TRANSLATION_WHITELIST):
entry.merge(newer_entry)
entry.msgstr = newer_entry.msgstr
elif entry.msgid not in fixed_ids:
if commit not in bad_commits:
bad_commits[commit] = set()
bad_commits[commit].add(po_filename)
fixed_ids.add(entry.msgid)
print(entry.msgid, "\"" + entry.msgstr + "\"", "\"" + newer_entry.msgstr + "\"")
first_translations.save(po_filename)
print()
for commit in bad_commits:
files = bad_commits[commit]
print(commit)
for file in files:
print("\t",file)
|
<commit_before><commit_msg>Add utility to remerge translations<commit_after># Validate that all entries in the .pot are in every .po. Only the .pot is updated so we can detect
# if a translation was added to the source but isn't in a .po. This ensures translators can grab
# complete files to work on.
import git
import sys
import polib
po_filenames = sys.argv[1:]
repo = git.Repo()
NO_TRANSLATION_WHITELIST = ["ffi_prep_closure_loc"]
bad_commits = {}
for po_filename in po_filenames:
print("Checking", po_filename)
commits = repo.iter_commits(paths=po_filename, reverse=True, topo_order=True)
first_translations = None
fixed_ids = set()
for commit in commits:
try:
blob = commit.tree / po_filename
except KeyError:
continue
try:
current_file = polib.pofile(blob.data_stream.read().decode("utf-8"))
except OSError:
print("skipping invalid po in", commit)
continue
if not first_translations:
first_translations = current_file
continue
print(commit.authored_date, commit)
first_translations.metadata = current_file.metadata
for entry in first_translations:
if entry.msgid == "soft reboot\n":
continue
newer_entry = current_file.find(entry.msgid)
if newer_entry and entry.msgstr != newer_entry.msgstr:
if newer_entry.msgstr != "" and (newer_entry.msgstr != entry.msgid or entry.msgid in NO_TRANSLATION_WHITELIST):
entry.merge(newer_entry)
entry.msgstr = newer_entry.msgstr
elif entry.msgid not in fixed_ids:
if commit not in bad_commits:
bad_commits[commit] = set()
bad_commits[commit].add(po_filename)
fixed_ids.add(entry.msgid)
print(entry.msgid, "\"" + entry.msgstr + "\"", "\"" + newer_entry.msgstr + "\"")
first_translations.save(po_filename)
print()
for commit in bad_commits:
files = bad_commits[commit]
print(commit)
for file in files:
print("\t",file)
|
|
dc818ce216bde42fdf60852e2320aaf3e6e45156
|
cli2phone.py
|
cli2phone.py
|
#!/usr/bin/env python
"""cli2phone pushes messages from the command line to your android phone.
Requires Android 2.2 or newer, and the chrometophone application installed.
See: http://code.google.com/p/chrometophone/
Usage: cli2phone URL
"""
import sys
import getopt
from auth import Auth
apiVersion = '5'
baseUrl = 'https://chrometophone.appspot.com/send?ver=' + apiVersion
def main(argv=None):
if argv is None:
argv = sys.argv
try:
opts, args = getopt.getopt(sys.argv[1:], "h", ["help"])
except getopt.error, msg:
print msg
print "for help use --help"
sys.exit(2)
# process options
for o, a in opts:
if o in ("-h", "--help"):
print __doc__
sys.exit(0)
# process arguments
if len(args) == 0:
print __doc__
else:
for arg in args:
send_url(arg)
def send_url(url):
"""Sends a URL to the phone"""
params = {'url': url,
'title': '',
'sel': '',
'type': '',
'deviceType': 'ac2dm'}
auth = Auth()
auth.request(baseUrl, params)
if __name__ == "__main__":
sys.exit(main())
|
Add the main interface file.
|
Add the main interface file.
|
Python
|
mit
|
sklnd/cli2phone
|
Add the main interface file.
|
#!/usr/bin/env python
"""cli2phone pushes messages from the command line to your android phone.
Requires Android 2.2 or newer, and the chrometophone application installed.
See: http://code.google.com/p/chrometophone/
Usage: cli2phone URL
"""
import sys
import getopt
from auth import Auth
apiVersion = '5'
baseUrl = 'https://chrometophone.appspot.com/send?ver=' + apiVersion
def main(argv=None):
if argv is None:
argv = sys.argv
try:
opts, args = getopt.getopt(sys.argv[1:], "h", ["help"])
except getopt.error, msg:
print msg
print "for help use --help"
sys.exit(2)
# process options
for o, a in opts:
if o in ("-h", "--help"):
print __doc__
sys.exit(0)
# process arguments
if len(args) == 0:
print __doc__
else:
for arg in args:
send_url(arg)
def send_url(url):
"""Sends a URL to the phone"""
params = {'url': url,
'title': '',
'sel': '',
'type': '',
'deviceType': 'ac2dm'}
auth = Auth()
auth.request(baseUrl, params)
if __name__ == "__main__":
sys.exit(main())
|
<commit_before><commit_msg>Add the main interface file.<commit_after>
|
#!/usr/bin/env python
"""cli2phone pushes messages from the command line to your android phone.
Requires Android 2.2 or newer, and the chrometophone application installed.
See: http://code.google.com/p/chrometophone/
Usage: cli2phone URL
"""
import sys
import getopt
from auth import Auth
apiVersion = '5'
baseUrl = 'https://chrometophone.appspot.com/send?ver=' + apiVersion
def main(argv=None):
if argv is None:
argv = sys.argv
try:
opts, args = getopt.getopt(sys.argv[1:], "h", ["help"])
except getopt.error, msg:
print msg
print "for help use --help"
sys.exit(2)
# process options
for o, a in opts:
if o in ("-h", "--help"):
print __doc__
sys.exit(0)
# process arguments
if len(args) == 0:
print __doc__
else:
for arg in args:
send_url(arg)
def send_url(url):
"""Sends a URL to the phone"""
params = {'url': url,
'title': '',
'sel': '',
'type': '',
'deviceType': 'ac2dm'}
auth = Auth()
auth.request(baseUrl, params)
if __name__ == "__main__":
sys.exit(main())
|
Add the main interface file.#!/usr/bin/env python
"""cli2phone pushes messages from the command line to your android phone.
Requires Android 2.2 or newer, and the chrometophone application installed.
See: http://code.google.com/p/chrometophone/
Usage: cli2phone URL
"""
import sys
import getopt
from auth import Auth
apiVersion = '5'
baseUrl = 'https://chrometophone.appspot.com/send?ver=' + apiVersion
def main(argv=None):
if argv is None:
argv = sys.argv
try:
opts, args = getopt.getopt(sys.argv[1:], "h", ["help"])
except getopt.error, msg:
print msg
print "for help use --help"
sys.exit(2)
# process options
for o, a in opts:
if o in ("-h", "--help"):
print __doc__
sys.exit(0)
# process arguments
if len(args) == 0:
print __doc__
else:
for arg in args:
send_url(arg)
def send_url(url):
"""Sends a URL to the phone"""
params = {'url': url,
'title': '',
'sel': '',
'type': '',
'deviceType': 'ac2dm'}
auth = Auth()
auth.request(baseUrl, params)
if __name__ == "__main__":
sys.exit(main())
|
<commit_before><commit_msg>Add the main interface file.<commit_after>#!/usr/bin/env python
"""cli2phone pushes messages from the command line to your android phone.
Requires Android 2.2 or newer, and the chrometophone application installed.
See: http://code.google.com/p/chrometophone/
Usage: cli2phone URL
"""
import sys
import getopt
from auth import Auth
apiVersion = '5'
baseUrl = 'https://chrometophone.appspot.com/send?ver=' + apiVersion
def main(argv=None):
if argv is None:
argv = sys.argv
try:
opts, args = getopt.getopt(sys.argv[1:], "h", ["help"])
except getopt.error, msg:
print msg
print "for help use --help"
sys.exit(2)
# process options
for o, a in opts:
if o in ("-h", "--help"):
print __doc__
sys.exit(0)
# process arguments
if len(args) == 0:
print __doc__
else:
for arg in args:
send_url(arg)
def send_url(url):
"""Sends a URL to the phone"""
params = {'url': url,
'title': '',
'sel': '',
'type': '',
'deviceType': 'ac2dm'}
auth = Auth()
auth.request(baseUrl, params)
if __name__ == "__main__":
sys.exit(main())
|
|
83ae6c1f17e7f744c81b53d02cfe8f058c5500da
|
CodeFights/mathPractice.py
|
CodeFights/mathPractice.py
|
#!/usr/local/bin/python
# Code Fights Math Practice Problem
from functools import reduce
def mathPractice(numbers):
return reduce(lambda acc, x: (acc + x[1] if x[0] % 2 else acc * x[1]),
enumerate(numbers), 1)
def main():
tests = [
[[1, 2, 3, 4, 5, 6], 71],
[[8, 9], 17],
[[0, 8, 15], 120],
[[3, 18, 5, 17, 7, 12, 3, 14], 2612],
[[9, 19, 2, 2, 7, 3, 0, 0, 6, 11, 14, 18, 11, 7, 9, 6, 8, 4, 13, 11],
1778151]
]
for t in tests:
res = mathPractice(t[0])
ans = t[1]
if ans == res:
print("PASSED: mathPractice({}) returned {}"
.format(t[0], res))
else:
print(("FAILED: mathPractice({}) returned {}, answer: {}")
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights math practice problem
|
Solve Code Fights math practice problem
|
Python
|
mit
|
HKuz/Test_Code
|
Solve Code Fights math practice problem
|
#!/usr/local/bin/python
# Code Fights Math Practice Problem
from functools import reduce
def mathPractice(numbers):
return reduce(lambda acc, x: (acc + x[1] if x[0] % 2 else acc * x[1]),
enumerate(numbers), 1)
def main():
tests = [
[[1, 2, 3, 4, 5, 6], 71],
[[8, 9], 17],
[[0, 8, 15], 120],
[[3, 18, 5, 17, 7, 12, 3, 14], 2612],
[[9, 19, 2, 2, 7, 3, 0, 0, 6, 11, 14, 18, 11, 7, 9, 6, 8, 4, 13, 11],
1778151]
]
for t in tests:
res = mathPractice(t[0])
ans = t[1]
if ans == res:
print("PASSED: mathPractice({}) returned {}"
.format(t[0], res))
else:
print(("FAILED: mathPractice({}) returned {}, answer: {}")
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights math practice problem<commit_after>
|
#!/usr/local/bin/python
# Code Fights Math Practice Problem
from functools import reduce
def mathPractice(numbers):
return reduce(lambda acc, x: (acc + x[1] if x[0] % 2 else acc * x[1]),
enumerate(numbers), 1)
def main():
tests = [
[[1, 2, 3, 4, 5, 6], 71],
[[8, 9], 17],
[[0, 8, 15], 120],
[[3, 18, 5, 17, 7, 12, 3, 14], 2612],
[[9, 19, 2, 2, 7, 3, 0, 0, 6, 11, 14, 18, 11, 7, 9, 6, 8, 4, 13, 11],
1778151]
]
for t in tests:
res = mathPractice(t[0])
ans = t[1]
if ans == res:
print("PASSED: mathPractice({}) returned {}"
.format(t[0], res))
else:
print(("FAILED: mathPractice({}) returned {}, answer: {}")
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights math practice problem#!/usr/local/bin/python
# Code Fights Math Practice Problem
from functools import reduce
def mathPractice(numbers):
return reduce(lambda acc, x: (acc + x[1] if x[0] % 2 else acc * x[1]),
enumerate(numbers), 1)
def main():
tests = [
[[1, 2, 3, 4, 5, 6], 71],
[[8, 9], 17],
[[0, 8, 15], 120],
[[3, 18, 5, 17, 7, 12, 3, 14], 2612],
[[9, 19, 2, 2, 7, 3, 0, 0, 6, 11, 14, 18, 11, 7, 9, 6, 8, 4, 13, 11],
1778151]
]
for t in tests:
res = mathPractice(t[0])
ans = t[1]
if ans == res:
print("PASSED: mathPractice({}) returned {}"
.format(t[0], res))
else:
print(("FAILED: mathPractice({}) returned {}, answer: {}")
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights math practice problem<commit_after>#!/usr/local/bin/python
# Code Fights Math Practice Problem
from functools import reduce
def mathPractice(numbers):
return reduce(lambda acc, x: (acc + x[1] if x[0] % 2 else acc * x[1]),
enumerate(numbers), 1)
def main():
tests = [
[[1, 2, 3, 4, 5, 6], 71],
[[8, 9], 17],
[[0, 8, 15], 120],
[[3, 18, 5, 17, 7, 12, 3, 14], 2612],
[[9, 19, 2, 2, 7, 3, 0, 0, 6, 11, 14, 18, 11, 7, 9, 6, 8, 4, 13, 11],
1778151]
]
for t in tests:
res = mathPractice(t[0])
ans = t[1]
if ans == res:
print("PASSED: mathPractice({}) returned {}"
.format(t[0], res))
else:
print(("FAILED: mathPractice({}) returned {}, answer: {}")
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
|
4c80716c799f6fd6e8d3f28f840ebf8443973931
|
py/partition-equal-subset-sum.py
|
py/partition-equal-subset-sum.py
|
class Solution(object):
def canPartition(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
s = sum(nums)
if s % 2 != 0:
return False
possible = [True] + [False] * (s / 2 - 1)
for n in nums:
if n > s / 2:
return False
if possible[s / 2 - n]:
return True
for v in xrange(s / 2 - 1, n - 1, -1):
possible[v] |= possible[v - n]
return False
|
Add py solution for 416. Partition Equal Subset Sum
|
Add py solution for 416. Partition Equal Subset Sum
416. Partition Equal Subset Sum: https://leetcode.com/problems/partition-equal-subset-sum/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 416. Partition Equal Subset Sum
416. Partition Equal Subset Sum: https://leetcode.com/problems/partition-equal-subset-sum/
|
class Solution(object):
def canPartition(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
s = sum(nums)
if s % 2 != 0:
return False
possible = [True] + [False] * (s / 2 - 1)
for n in nums:
if n > s / 2:
return False
if possible[s / 2 - n]:
return True
for v in xrange(s / 2 - 1, n - 1, -1):
possible[v] |= possible[v - n]
return False
|
<commit_before><commit_msg>Add py solution for 416. Partition Equal Subset Sum
416. Partition Equal Subset Sum: https://leetcode.com/problems/partition-equal-subset-sum/<commit_after>
|
class Solution(object):
def canPartition(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
s = sum(nums)
if s % 2 != 0:
return False
possible = [True] + [False] * (s / 2 - 1)
for n in nums:
if n > s / 2:
return False
if possible[s / 2 - n]:
return True
for v in xrange(s / 2 - 1, n - 1, -1):
possible[v] |= possible[v - n]
return False
|
Add py solution for 416. Partition Equal Subset Sum
416. Partition Equal Subset Sum: https://leetcode.com/problems/partition-equal-subset-sum/class Solution(object):
def canPartition(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
s = sum(nums)
if s % 2 != 0:
return False
possible = [True] + [False] * (s / 2 - 1)
for n in nums:
if n > s / 2:
return False
if possible[s / 2 - n]:
return True
for v in xrange(s / 2 - 1, n - 1, -1):
possible[v] |= possible[v - n]
return False
|
<commit_before><commit_msg>Add py solution for 416. Partition Equal Subset Sum
416. Partition Equal Subset Sum: https://leetcode.com/problems/partition-equal-subset-sum/<commit_after>class Solution(object):
def canPartition(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
s = sum(nums)
if s % 2 != 0:
return False
possible = [True] + [False] * (s / 2 - 1)
for n in nums:
if n > s / 2:
return False
if possible[s / 2 - n]:
return True
for v in xrange(s / 2 - 1, n - 1, -1):
possible[v] |= possible[v - n]
return False
|
|
ae4a5a3c1e85383304ea72762b7bcabbb46c2a0d
|
mcdowell/src/main/python/ch1/ch1.py
|
mcdowell/src/main/python/ch1/ch1.py
|
def unique(string):
counter = {}
for c in string:
if c in counter:
counter[c] += 1
else:
counter[c] = 1
print(counter)
for k in counter:
if counter[k] > 1:
return False
else:
return True
def reverse(string):
result = []
for i in range(len(string)):
result.append(string[-(i+1)])
return "".join(result)
|
Add new Python module for McDowell Chapter 1.
|
Add new Python module for McDowell Chapter 1.
|
Python
|
mit
|
jamesewoo/tigeruppercut,jamesewoo/tigeruppercut
|
Add new Python module for McDowell Chapter 1.
|
def unique(string):
counter = {}
for c in string:
if c in counter:
counter[c] += 1
else:
counter[c] = 1
print(counter)
for k in counter:
if counter[k] > 1:
return False
else:
return True
def reverse(string):
result = []
for i in range(len(string)):
result.append(string[-(i+1)])
return "".join(result)
|
<commit_before><commit_msg>Add new Python module for McDowell Chapter 1.<commit_after>
|
def unique(string):
counter = {}
for c in string:
if c in counter:
counter[c] += 1
else:
counter[c] = 1
print(counter)
for k in counter:
if counter[k] > 1:
return False
else:
return True
def reverse(string):
result = []
for i in range(len(string)):
result.append(string[-(i+1)])
return "".join(result)
|
Add new Python module for McDowell Chapter 1.def unique(string):
counter = {}
for c in string:
if c in counter:
counter[c] += 1
else:
counter[c] = 1
print(counter)
for k in counter:
if counter[k] > 1:
return False
else:
return True
def reverse(string):
result = []
for i in range(len(string)):
result.append(string[-(i+1)])
return "".join(result)
|
<commit_before><commit_msg>Add new Python module for McDowell Chapter 1.<commit_after>def unique(string):
counter = {}
for c in string:
if c in counter:
counter[c] += 1
else:
counter[c] = 1
print(counter)
for k in counter:
if counter[k] > 1:
return False
else:
return True
def reverse(string):
result = []
for i in range(len(string)):
result.append(string[-(i+1)])
return "".join(result)
|
|
9179e794ac8633ac498d2d0324e2083e8c6ed509
|
iacli/ia_search.py
|
iacli/ia_search.py
|
"""Search the Internet Archive using the Archive.org Advanced Search
API <https://archive.org/advancedsearch.php#raw>.
usage:
ia search [--help] <query>... [options...]
options:
-h, --help
-p, --parameters=<key:value>... Parameters to send with your query.
-s, --sort=<field:order>... Sort search results by specified
fields. <order> can be either "asc"
for ascending and "desc" for
descending.
-f, --field=<field>... Metadata fields to return.
"""
from docopt import docopt
import sys
import internetarchive
# main()
#_________________________________________________________________________________________
def main(argv):
args = docopt(__doc__, argv=argv)
params = dict(p.split(':') for p in args['--parameters'])
if args['--sort']:
for i, field in enumerate(args['--sort']):
key = 'sort[{0}]'.format(i)
params[key] = field.strip().replace(':', ' ')
fields = ['identifier'] + args['--field']
query = ' '.join(args['<query>'])
search = internetarchive.Search(query, fields=fields, params=params)
for result in search.results:
output = '\t'.join([result[f] for f in fields]).encode('utf-8')
sys.stdout.write(output + '\n')
sys.exit(0)
|
"""Search the Internet Archive using the Archive.org Advanced Search
API <https://archive.org/advancedsearch.php#raw>.
usage:
ia search [--help] <query>... [options...]
options:
-h, --help
-p, --parameters=<key:value>... Parameters to send with your query.
-s, --sort=<field:order>... Sort search results by specified
fields. <order> can be either "asc"
for ascending and "desc" for
descending.
-f, --field=<field>... Metadata fields to return.
"""
from docopt import docopt
import sys
from internetarchive import search
# main()
#_________________________________________________________________________________________
def main(argv):
args = docopt(__doc__, argv=argv)
params = dict(p.split(':') for p in args['--parameters'])
if args['--sort']:
for i, field in enumerate(args['--sort']):
key = 'sort[{0}]'.format(i)
params[key] = field.strip().replace(':', ' ')
fields = ['identifier'] + args['--field']
query = ' '.join(args['<query>'])
search_resp = search(query, fields=fields, params=params)
for result in search_resp.results():
output = '\t'.join([result[f] for f in fields]).encode('utf-8')
sys.stdout.write(output + '\n')
sys.exit(0)
|
Use `internetarchive.search()` function rather than `internetarchive.Search` class.
|
Use `internetarchive.search()` function rather than `internetarchive.Search` class.
|
Python
|
agpl-3.0
|
JesseWeinstein/internetarchive,dattasaurabh82/internetarchive,jjjake/internetarchive,wumpus/internetarchive,brycedrennan/internetarchive
|
"""Search the Internet Archive using the Archive.org Advanced Search
API <https://archive.org/advancedsearch.php#raw>.
usage:
ia search [--help] <query>... [options...]
options:
-h, --help
-p, --parameters=<key:value>... Parameters to send with your query.
-s, --sort=<field:order>... Sort search results by specified
fields. <order> can be either "asc"
for ascending and "desc" for
descending.
-f, --field=<field>... Metadata fields to return.
"""
from docopt import docopt
import sys
import internetarchive
# main()
#_________________________________________________________________________________________
def main(argv):
args = docopt(__doc__, argv=argv)
params = dict(p.split(':') for p in args['--parameters'])
if args['--sort']:
for i, field in enumerate(args['--sort']):
key = 'sort[{0}]'.format(i)
params[key] = field.strip().replace(':', ' ')
fields = ['identifier'] + args['--field']
query = ' '.join(args['<query>'])
search = internetarchive.Search(query, fields=fields, params=params)
for result in search.results:
output = '\t'.join([result[f] for f in fields]).encode('utf-8')
sys.stdout.write(output + '\n')
sys.exit(0)
Use `internetarchive.search()` function rather than `internetarchive.Search` class.
|
"""Search the Internet Archive using the Archive.org Advanced Search
API <https://archive.org/advancedsearch.php#raw>.
usage:
ia search [--help] <query>... [options...]
options:
-h, --help
-p, --parameters=<key:value>... Parameters to send with your query.
-s, --sort=<field:order>... Sort search results by specified
fields. <order> can be either "asc"
for ascending and "desc" for
descending.
-f, --field=<field>... Metadata fields to return.
"""
from docopt import docopt
import sys
from internetarchive import search
# main()
#_________________________________________________________________________________________
def main(argv):
args = docopt(__doc__, argv=argv)
params = dict(p.split(':') for p in args['--parameters'])
if args['--sort']:
for i, field in enumerate(args['--sort']):
key = 'sort[{0}]'.format(i)
params[key] = field.strip().replace(':', ' ')
fields = ['identifier'] + args['--field']
query = ' '.join(args['<query>'])
search_resp = search(query, fields=fields, params=params)
for result in search_resp.results():
output = '\t'.join([result[f] for f in fields]).encode('utf-8')
sys.stdout.write(output + '\n')
sys.exit(0)
|
<commit_before>"""Search the Internet Archive using the Archive.org Advanced Search
API <https://archive.org/advancedsearch.php#raw>.
usage:
ia search [--help] <query>... [options...]
options:
-h, --help
-p, --parameters=<key:value>... Parameters to send with your query.
-s, --sort=<field:order>... Sort search results by specified
fields. <order> can be either "asc"
for ascending and "desc" for
descending.
-f, --field=<field>... Metadata fields to return.
"""
from docopt import docopt
import sys
import internetarchive
# main()
#_________________________________________________________________________________________
def main(argv):
args = docopt(__doc__, argv=argv)
params = dict(p.split(':') for p in args['--parameters'])
if args['--sort']:
for i, field in enumerate(args['--sort']):
key = 'sort[{0}]'.format(i)
params[key] = field.strip().replace(':', ' ')
fields = ['identifier'] + args['--field']
query = ' '.join(args['<query>'])
search = internetarchive.Search(query, fields=fields, params=params)
for result in search.results:
output = '\t'.join([result[f] for f in fields]).encode('utf-8')
sys.stdout.write(output + '\n')
sys.exit(0)
<commit_msg>Use `internetarchive.search()` function rather than `internetarchive.Search` class.<commit_after>
|
"""Search the Internet Archive using the Archive.org Advanced Search
API <https://archive.org/advancedsearch.php#raw>.
usage:
ia search [--help] <query>... [options...]
options:
-h, --help
-p, --parameters=<key:value>... Parameters to send with your query.
-s, --sort=<field:order>... Sort search results by specified
fields. <order> can be either "asc"
for ascending and "desc" for
descending.
-f, --field=<field>... Metadata fields to return.
"""
from docopt import docopt
import sys
from internetarchive import search
# main()
#_________________________________________________________________________________________
def main(argv):
args = docopt(__doc__, argv=argv)
params = dict(p.split(':') for p in args['--parameters'])
if args['--sort']:
for i, field in enumerate(args['--sort']):
key = 'sort[{0}]'.format(i)
params[key] = field.strip().replace(':', ' ')
fields = ['identifier'] + args['--field']
query = ' '.join(args['<query>'])
search_resp = search(query, fields=fields, params=params)
for result in search_resp.results():
output = '\t'.join([result[f] for f in fields]).encode('utf-8')
sys.stdout.write(output + '\n')
sys.exit(0)
|
"""Search the Internet Archive using the Archive.org Advanced Search
API <https://archive.org/advancedsearch.php#raw>.
usage:
ia search [--help] <query>... [options...]
options:
-h, --help
-p, --parameters=<key:value>... Parameters to send with your query.
-s, --sort=<field:order>... Sort search results by specified
fields. <order> can be either "asc"
for ascending and "desc" for
descending.
-f, --field=<field>... Metadata fields to return.
"""
from docopt import docopt
import sys
import internetarchive
# main()
#_________________________________________________________________________________________
def main(argv):
args = docopt(__doc__, argv=argv)
params = dict(p.split(':') for p in args['--parameters'])
if args['--sort']:
for i, field in enumerate(args['--sort']):
key = 'sort[{0}]'.format(i)
params[key] = field.strip().replace(':', ' ')
fields = ['identifier'] + args['--field']
query = ' '.join(args['<query>'])
search = internetarchive.Search(query, fields=fields, params=params)
for result in search.results:
output = '\t'.join([result[f] for f in fields]).encode('utf-8')
sys.stdout.write(output + '\n')
sys.exit(0)
Use `internetarchive.search()` function rather than `internetarchive.Search` class."""Search the Internet Archive using the Archive.org Advanced Search
API <https://archive.org/advancedsearch.php#raw>.
usage:
ia search [--help] <query>... [options...]
options:
-h, --help
-p, --parameters=<key:value>... Parameters to send with your query.
-s, --sort=<field:order>... Sort search results by specified
fields. <order> can be either "asc"
for ascending and "desc" for
descending.
-f, --field=<field>... Metadata fields to return.
"""
from docopt import docopt
import sys
from internetarchive import search
# main()
#_________________________________________________________________________________________
def main(argv):
args = docopt(__doc__, argv=argv)
params = dict(p.split(':') for p in args['--parameters'])
if args['--sort']:
for i, field in enumerate(args['--sort']):
key = 'sort[{0}]'.format(i)
params[key] = field.strip().replace(':', ' ')
fields = ['identifier'] + args['--field']
query = ' '.join(args['<query>'])
search_resp = search(query, fields=fields, params=params)
for result in search_resp.results():
output = '\t'.join([result[f] for f in fields]).encode('utf-8')
sys.stdout.write(output + '\n')
sys.exit(0)
|
<commit_before>"""Search the Internet Archive using the Archive.org Advanced Search
API <https://archive.org/advancedsearch.php#raw>.
usage:
ia search [--help] <query>... [options...]
options:
-h, --help
-p, --parameters=<key:value>... Parameters to send with your query.
-s, --sort=<field:order>... Sort search results by specified
fields. <order> can be either "asc"
for ascending and "desc" for
descending.
-f, --field=<field>... Metadata fields to return.
"""
from docopt import docopt
import sys
import internetarchive
# main()
#_________________________________________________________________________________________
def main(argv):
args = docopt(__doc__, argv=argv)
params = dict(p.split(':') for p in args['--parameters'])
if args['--sort']:
for i, field in enumerate(args['--sort']):
key = 'sort[{0}]'.format(i)
params[key] = field.strip().replace(':', ' ')
fields = ['identifier'] + args['--field']
query = ' '.join(args['<query>'])
search = internetarchive.Search(query, fields=fields, params=params)
for result in search.results:
output = '\t'.join([result[f] for f in fields]).encode('utf-8')
sys.stdout.write(output + '\n')
sys.exit(0)
<commit_msg>Use `internetarchive.search()` function rather than `internetarchive.Search` class.<commit_after>"""Search the Internet Archive using the Archive.org Advanced Search
API <https://archive.org/advancedsearch.php#raw>.
usage:
ia search [--help] <query>... [options...]
options:
-h, --help
-p, --parameters=<key:value>... Parameters to send with your query.
-s, --sort=<field:order>... Sort search results by specified
fields. <order> can be either "asc"
for ascending and "desc" for
descending.
-f, --field=<field>... Metadata fields to return.
"""
from docopt import docopt
import sys
from internetarchive import search
# main()
#_________________________________________________________________________________________
def main(argv):
args = docopt(__doc__, argv=argv)
params = dict(p.split(':') for p in args['--parameters'])
if args['--sort']:
for i, field in enumerate(args['--sort']):
key = 'sort[{0}]'.format(i)
params[key] = field.strip().replace(':', ' ')
fields = ['identifier'] + args['--field']
query = ' '.join(args['<query>'])
search_resp = search(query, fields=fields, params=params)
for result in search_resp.results():
output = '\t'.join([result[f] for f in fields]).encode('utf-8')
sys.stdout.write(output + '\n')
sys.exit(0)
|
0cae12bdbde6fa87f65025bc14ee5863f2526c4b
|
Snippets/edit_raw_table_data.py
|
Snippets/edit_raw_table_data.py
|
from fontTools.ttLib import TTFont
from fontTools.ttLib.tables.DefaultTable import DefaultTable
font_path = "myfont.ttf"
output_path = "myfont_patched.ttf"
table_tag = "DSIG"
# Get raw table data from the source font
font = TTFont(font_path)
raw_data = font.getTableData(table_tag)
# Do something with the raw table data
# This example just sets an empty DSIG table.
raw_data = "\0\0\0\1\0\0\0\0"
# Write the data back to the font
# We could re-use the existing table when the source and target font are
# identical, but let's make a new empty table to be more universal.
table = DefaultTable(table_tag)
table.data = raw_data
# Add the new table back into the source font and save under a new name.
font[table_tag] = table
font.save(output_path)
|
Add snippet manipulating raw table data
|
Add snippet manipulating raw table data
|
Python
|
mit
|
fonttools/fonttools,googlefonts/fonttools
|
Add snippet manipulating raw table data
|
from fontTools.ttLib import TTFont
from fontTools.ttLib.tables.DefaultTable import DefaultTable
font_path = "myfont.ttf"
output_path = "myfont_patched.ttf"
table_tag = "DSIG"
# Get raw table data from the source font
font = TTFont(font_path)
raw_data = font.getTableData(table_tag)
# Do something with the raw table data
# This example just sets an empty DSIG table.
raw_data = "\0\0\0\1\0\0\0\0"
# Write the data back to the font
# We could re-use the existing table when the source and target font are
# identical, but let's make a new empty table to be more universal.
table = DefaultTable(table_tag)
table.data = raw_data
# Add the new table back into the source font and save under a new name.
font[table_tag] = table
font.save(output_path)
|
<commit_before><commit_msg>Add snippet manipulating raw table data<commit_after>
|
from fontTools.ttLib import TTFont
from fontTools.ttLib.tables.DefaultTable import DefaultTable
font_path = "myfont.ttf"
output_path = "myfont_patched.ttf"
table_tag = "DSIG"
# Get raw table data from the source font
font = TTFont(font_path)
raw_data = font.getTableData(table_tag)
# Do something with the raw table data
# This example just sets an empty DSIG table.
raw_data = "\0\0\0\1\0\0\0\0"
# Write the data back to the font
# We could re-use the existing table when the source and target font are
# identical, but let's make a new empty table to be more universal.
table = DefaultTable(table_tag)
table.data = raw_data
# Add the new table back into the source font and save under a new name.
font[table_tag] = table
font.save(output_path)
|
Add snippet manipulating raw table datafrom fontTools.ttLib import TTFont
from fontTools.ttLib.tables.DefaultTable import DefaultTable
font_path = "myfont.ttf"
output_path = "myfont_patched.ttf"
table_tag = "DSIG"
# Get raw table data from the source font
font = TTFont(font_path)
raw_data = font.getTableData(table_tag)
# Do something with the raw table data
# This example just sets an empty DSIG table.
raw_data = "\0\0\0\1\0\0\0\0"
# Write the data back to the font
# We could re-use the existing table when the source and target font are
# identical, but let's make a new empty table to be more universal.
table = DefaultTable(table_tag)
table.data = raw_data
# Add the new table back into the source font and save under a new name.
font[table_tag] = table
font.save(output_path)
|
<commit_before><commit_msg>Add snippet manipulating raw table data<commit_after>from fontTools.ttLib import TTFont
from fontTools.ttLib.tables.DefaultTable import DefaultTable
font_path = "myfont.ttf"
output_path = "myfont_patched.ttf"
table_tag = "DSIG"
# Get raw table data from the source font
font = TTFont(font_path)
raw_data = font.getTableData(table_tag)
# Do something with the raw table data
# This example just sets an empty DSIG table.
raw_data = "\0\0\0\1\0\0\0\0"
# Write the data back to the font
# We could re-use the existing table when the source and target font are
# identical, but let's make a new empty table to be more universal.
table = DefaultTable(table_tag)
table.data = raw_data
# Add the new table back into the source font and save under a new name.
font[table_tag] = table
font.save(output_path)
|
|
f342ce1ee57b0fdc6847d80a7453f575e4301299
|
py/partition-to-k-equal-sum-subsets.py
|
py/partition-to-k-equal-sum-subsets.py
|
class Solution(object):
def canPartitionKSubsets(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: bool
"""
s = sum(nums)
if s % k != 0:
return False
target = s / k
lnums = len(nums)
fail = set()
nums.sort(reverse=True)
def dfs(groups, cur, flag):
if groups == k - 1:
return True
for i in xrange(lnums):
n = nums[i]
if (1 << i) & flag == 0 and flag | (1 << i) not in fail:
if cur + n <= target:
if dfs(groups + (cur + n) / target, (cur + n) % target, flag | (1 << i)):
return True
fail.add(flag)
return False
return dfs(0, 0, 0)
|
Add py solution for 698. Partition to K Equal Sum Subsets
|
Add py solution for 698. Partition to K Equal Sum Subsets
698. Partition to K Equal Sum Subsets: https://leetcode.com/problems/partition-to-k-equal-sum-subsets/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 698. Partition to K Equal Sum Subsets
698. Partition to K Equal Sum Subsets: https://leetcode.com/problems/partition-to-k-equal-sum-subsets/
|
class Solution(object):
def canPartitionKSubsets(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: bool
"""
s = sum(nums)
if s % k != 0:
return False
target = s / k
lnums = len(nums)
fail = set()
nums.sort(reverse=True)
def dfs(groups, cur, flag):
if groups == k - 1:
return True
for i in xrange(lnums):
n = nums[i]
if (1 << i) & flag == 0 and flag | (1 << i) not in fail:
if cur + n <= target:
if dfs(groups + (cur + n) / target, (cur + n) % target, flag | (1 << i)):
return True
fail.add(flag)
return False
return dfs(0, 0, 0)
|
<commit_before><commit_msg>Add py solution for 698. Partition to K Equal Sum Subsets
698. Partition to K Equal Sum Subsets: https://leetcode.com/problems/partition-to-k-equal-sum-subsets/<commit_after>
|
class Solution(object):
def canPartitionKSubsets(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: bool
"""
s = sum(nums)
if s % k != 0:
return False
target = s / k
lnums = len(nums)
fail = set()
nums.sort(reverse=True)
def dfs(groups, cur, flag):
if groups == k - 1:
return True
for i in xrange(lnums):
n = nums[i]
if (1 << i) & flag == 0 and flag | (1 << i) not in fail:
if cur + n <= target:
if dfs(groups + (cur + n) / target, (cur + n) % target, flag | (1 << i)):
return True
fail.add(flag)
return False
return dfs(0, 0, 0)
|
Add py solution for 698. Partition to K Equal Sum Subsets
698. Partition to K Equal Sum Subsets: https://leetcode.com/problems/partition-to-k-equal-sum-subsets/class Solution(object):
def canPartitionKSubsets(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: bool
"""
s = sum(nums)
if s % k != 0:
return False
target = s / k
lnums = len(nums)
fail = set()
nums.sort(reverse=True)
def dfs(groups, cur, flag):
if groups == k - 1:
return True
for i in xrange(lnums):
n = nums[i]
if (1 << i) & flag == 0 and flag | (1 << i) not in fail:
if cur + n <= target:
if dfs(groups + (cur + n) / target, (cur + n) % target, flag | (1 << i)):
return True
fail.add(flag)
return False
return dfs(0, 0, 0)
|
<commit_before><commit_msg>Add py solution for 698. Partition to K Equal Sum Subsets
698. Partition to K Equal Sum Subsets: https://leetcode.com/problems/partition-to-k-equal-sum-subsets/<commit_after>class Solution(object):
def canPartitionKSubsets(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: bool
"""
s = sum(nums)
if s % k != 0:
return False
target = s / k
lnums = len(nums)
fail = set()
nums.sort(reverse=True)
def dfs(groups, cur, flag):
if groups == k - 1:
return True
for i in xrange(lnums):
n = nums[i]
if (1 << i) & flag == 0 and flag | (1 << i) not in fail:
if cur + n <= target:
if dfs(groups + (cur + n) / target, (cur + n) % target, flag | (1 << i)):
return True
fail.add(flag)
return False
return dfs(0, 0, 0)
|
|
0a6eaaee9acdfee7154c9b10f85636e2a776c296
|
blueprints/aws_s3_bucket/discover_s3_buckets.py
|
blueprints/aws_s3_bucket/discover_s3_buckets.py
|
"""
Discover and create S3 Bucket records with some basic identifying attributes.
As all Discovery Plug-ins must do, we define the global `RESOURCE_IDENTIFIER` variable
and return a list of dictionaries from the `discover_resources` function.
"""
import boto3
from botocore.client import ClientError
from common.methods import set_progress
from resourcehandlers.aws.models import AWSHandler
RESOURCE_IDENTIFIER = 's3_bucket_name'
def discover_resources(**kwargs):
discovered_buckets = []
for handler in AWSHandler.objects.all():
set_progress('Connecting to Amazon S3 for handler: {}'.format(handler))
conn = boto3.resource(
's3',
aws_access_key_id=handler.serviceaccount,
aws_secret_access_key=handler.servicepasswd,
)
try:
for bucket in conn.buckets.all():
discovered_buckets.append({
"s3_bucket_name": bucket.name,
"aws_rh_id": handler.id,
"created_in_s3": str(bucket.creation_date)
})
except ClientError as e:
set_progress('AWS ClientError: {}'.format(e))
continue
return discovered_buckets
|
Add Discovery Plug-in for AWS S3 Bucket Blueprint
|
Add Discovery Plug-in for AWS S3 Bucket Blueprint
Provide a Plug-in that Discovers and Syncs any existing S3 Buckets for all configured AWS Resource Handlers. This Plug-in creates the S3 Bucket Resources in CloudBolt with the necessary attributes to support the Teardown Plug-in already provided for this Blueprint.
|
Python
|
apache-2.0
|
CloudBoltSoftware/cloudbolt-forge,CloudBoltSoftware/cloudbolt-forge,CloudBoltSoftware/cloudbolt-forge,CloudBoltSoftware/cloudbolt-forge
|
Add Discovery Plug-in for AWS S3 Bucket Blueprint
Provide a Plug-in that Discovers and Syncs any existing S3 Buckets for all configured AWS Resource Handlers. This Plug-in creates the S3 Bucket Resources in CloudBolt with the necessary attributes to support the Teardown Plug-in already provided for this Blueprint.
|
"""
Discover and create S3 Bucket records with some basic identifying attributes.
As all Discovery Plug-ins must do, we define the global `RESOURCE_IDENTIFIER` variable
and return a list of dictionaries from the `discover_resources` function.
"""
import boto3
from botocore.client import ClientError
from common.methods import set_progress
from resourcehandlers.aws.models import AWSHandler
RESOURCE_IDENTIFIER = 's3_bucket_name'
def discover_resources(**kwargs):
discovered_buckets = []
for handler in AWSHandler.objects.all():
set_progress('Connecting to Amazon S3 for handler: {}'.format(handler))
conn = boto3.resource(
's3',
aws_access_key_id=handler.serviceaccount,
aws_secret_access_key=handler.servicepasswd,
)
try:
for bucket in conn.buckets.all():
discovered_buckets.append({
"s3_bucket_name": bucket.name,
"aws_rh_id": handler.id,
"created_in_s3": str(bucket.creation_date)
})
except ClientError as e:
set_progress('AWS ClientError: {}'.format(e))
continue
return discovered_buckets
|
<commit_before><commit_msg>Add Discovery Plug-in for AWS S3 Bucket Blueprint
Provide a Plug-in that Discovers and Syncs any existing S3 Buckets for all configured AWS Resource Handlers. This Plug-in creates the S3 Bucket Resources in CloudBolt with the necessary attributes to support the Teardown Plug-in already provided for this Blueprint.<commit_after>
|
"""
Discover and create S3 Bucket records with some basic identifying attributes.
As all Discovery Plug-ins must do, we define the global `RESOURCE_IDENTIFIER` variable
and return a list of dictionaries from the `discover_resources` function.
"""
import boto3
from botocore.client import ClientError
from common.methods import set_progress
from resourcehandlers.aws.models import AWSHandler
RESOURCE_IDENTIFIER = 's3_bucket_name'
def discover_resources(**kwargs):
discovered_buckets = []
for handler in AWSHandler.objects.all():
set_progress('Connecting to Amazon S3 for handler: {}'.format(handler))
conn = boto3.resource(
's3',
aws_access_key_id=handler.serviceaccount,
aws_secret_access_key=handler.servicepasswd,
)
try:
for bucket in conn.buckets.all():
discovered_buckets.append({
"s3_bucket_name": bucket.name,
"aws_rh_id": handler.id,
"created_in_s3": str(bucket.creation_date)
})
except ClientError as e:
set_progress('AWS ClientError: {}'.format(e))
continue
return discovered_buckets
|
Add Discovery Plug-in for AWS S3 Bucket Blueprint
Provide a Plug-in that Discovers and Syncs any existing S3 Buckets for all configured AWS Resource Handlers. This Plug-in creates the S3 Bucket Resources in CloudBolt with the necessary attributes to support the Teardown Plug-in already provided for this Blueprint."""
Discover and create S3 Bucket records with some basic identifying attributes.
As all Discovery Plug-ins must do, we define the global `RESOURCE_IDENTIFIER` variable
and return a list of dictionaries from the `discover_resources` function.
"""
import boto3
from botocore.client import ClientError
from common.methods import set_progress
from resourcehandlers.aws.models import AWSHandler
RESOURCE_IDENTIFIER = 's3_bucket_name'
def discover_resources(**kwargs):
discovered_buckets = []
for handler in AWSHandler.objects.all():
set_progress('Connecting to Amazon S3 for handler: {}'.format(handler))
conn = boto3.resource(
's3',
aws_access_key_id=handler.serviceaccount,
aws_secret_access_key=handler.servicepasswd,
)
try:
for bucket in conn.buckets.all():
discovered_buckets.append({
"s3_bucket_name": bucket.name,
"aws_rh_id": handler.id,
"created_in_s3": str(bucket.creation_date)
})
except ClientError as e:
set_progress('AWS ClientError: {}'.format(e))
continue
return discovered_buckets
|
<commit_before><commit_msg>Add Discovery Plug-in for AWS S3 Bucket Blueprint
Provide a Plug-in that Discovers and Syncs any existing S3 Buckets for all configured AWS Resource Handlers. This Plug-in creates the S3 Bucket Resources in CloudBolt with the necessary attributes to support the Teardown Plug-in already provided for this Blueprint.<commit_after>"""
Discover and create S3 Bucket records with some basic identifying attributes.
As all Discovery Plug-ins must do, we define the global `RESOURCE_IDENTIFIER` variable
and return a list of dictionaries from the `discover_resources` function.
"""
import boto3
from botocore.client import ClientError
from common.methods import set_progress
from resourcehandlers.aws.models import AWSHandler
RESOURCE_IDENTIFIER = 's3_bucket_name'
def discover_resources(**kwargs):
discovered_buckets = []
for handler in AWSHandler.objects.all():
set_progress('Connecting to Amazon S3 for handler: {}'.format(handler))
conn = boto3.resource(
's3',
aws_access_key_id=handler.serviceaccount,
aws_secret_access_key=handler.servicepasswd,
)
try:
for bucket in conn.buckets.all():
discovered_buckets.append({
"s3_bucket_name": bucket.name,
"aws_rh_id": handler.id,
"created_in_s3": str(bucket.creation_date)
})
except ClientError as e:
set_progress('AWS ClientError: {}'.format(e))
continue
return discovered_buckets
|
|
7bd32a767490c534685d1b421dcbf7e62deddc06
|
send_email.py
|
send_email.py
|
import sendgrid
import os
from sendgrid.helpers.mail import *
sg = sendgrid.SendGridAPIClient(apikey=os.environ.get('SENDGRID_API_KEY'))
from_email = Email("SpyPi@no-reply")
subject = "Motion Detected"
to_email = Email("sendToUser@example.com")
content = Content("text/plain", "We have detected motion from your pi!\n\n")
mail = Mail(from_email, subject, to_email, content)
response = sg.client.mail.send.post(request_body=mail.get())
print(response.status_code)
print(response.body)
print(response.headers)
|
Send email using sendgrid API
|
Send email using sendgrid API
|
Python
|
mit
|
efagerberg/PiCam
|
Send email using sendgrid API
|
import sendgrid
import os
from sendgrid.helpers.mail import *
sg = sendgrid.SendGridAPIClient(apikey=os.environ.get('SENDGRID_API_KEY'))
from_email = Email("SpyPi@no-reply")
subject = "Motion Detected"
to_email = Email("sendToUser@example.com")
content = Content("text/plain", "We have detected motion from your pi!\n\n")
mail = Mail(from_email, subject, to_email, content)
response = sg.client.mail.send.post(request_body=mail.get())
print(response.status_code)
print(response.body)
print(response.headers)
|
<commit_before><commit_msg>Send email using sendgrid API<commit_after>
|
import sendgrid
import os
from sendgrid.helpers.mail import *
sg = sendgrid.SendGridAPIClient(apikey=os.environ.get('SENDGRID_API_KEY'))
from_email = Email("SpyPi@no-reply")
subject = "Motion Detected"
to_email = Email("sendToUser@example.com")
content = Content("text/plain", "We have detected motion from your pi!\n\n")
mail = Mail(from_email, subject, to_email, content)
response = sg.client.mail.send.post(request_body=mail.get())
print(response.status_code)
print(response.body)
print(response.headers)
|
Send email using sendgrid APIimport sendgrid
import os
from sendgrid.helpers.mail import *
sg = sendgrid.SendGridAPIClient(apikey=os.environ.get('SENDGRID_API_KEY'))
from_email = Email("SpyPi@no-reply")
subject = "Motion Detected"
to_email = Email("sendToUser@example.com")
content = Content("text/plain", "We have detected motion from your pi!\n\n")
mail = Mail(from_email, subject, to_email, content)
response = sg.client.mail.send.post(request_body=mail.get())
print(response.status_code)
print(response.body)
print(response.headers)
|
<commit_before><commit_msg>Send email using sendgrid API<commit_after>import sendgrid
import os
from sendgrid.helpers.mail import *
sg = sendgrid.SendGridAPIClient(apikey=os.environ.get('SENDGRID_API_KEY'))
from_email = Email("SpyPi@no-reply")
subject = "Motion Detected"
to_email = Email("sendToUser@example.com")
content = Content("text/plain", "We have detected motion from your pi!\n\n")
mail = Mail(from_email, subject, to_email, content)
response = sg.client.mail.send.post(request_body=mail.get())
print(response.status_code)
print(response.body)
print(response.headers)
|
|
51be58367217bbfc3e677afc79a73edb26ec6665
|
problem_37.py
|
problem_37.py
|
from problem_35 import is_prime
from time import time
TRUNCATABLE_PRIMES = []
def is_truncatable_prime(num):
for i in ['0', '4', '6', '8']:
if i in str(num):
return False
for i in ['2', '5']:
if i in str(num)[1:]:
return False
if not is_prime(num):
return False
for idx in range(len(str(num)) - 1):
if not is_prime(int(str(num)[idx+1:])):
return False
for idx in range(len(str(num)) - 1):
if not is_prime(int(str(num)[0:-1*idx-1])):
return False
return True
def main():
counter = 11
while len(TRUNCATABLE_PRIMES) < 11:
counter += 2
if is_truncatable_prime(counter):
TRUNCATABLE_PRIMES.append(counter)
print TRUNCATABLE_PRIMES
print 'Sum:', sum(TRUNCATABLE_PRIMES)
if __name__ == '__main__':
t = time()
main()
print 'Time:', time() - t
|
Add problem 37, truncatable primes
|
Add problem 37, truncatable primes
|
Python
|
mit
|
dimkarakostas/project-euler
|
Add problem 37, truncatable primes
|
from problem_35 import is_prime
from time import time
TRUNCATABLE_PRIMES = []
def is_truncatable_prime(num):
for i in ['0', '4', '6', '8']:
if i in str(num):
return False
for i in ['2', '5']:
if i in str(num)[1:]:
return False
if not is_prime(num):
return False
for idx in range(len(str(num)) - 1):
if not is_prime(int(str(num)[idx+1:])):
return False
for idx in range(len(str(num)) - 1):
if not is_prime(int(str(num)[0:-1*idx-1])):
return False
return True
def main():
counter = 11
while len(TRUNCATABLE_PRIMES) < 11:
counter += 2
if is_truncatable_prime(counter):
TRUNCATABLE_PRIMES.append(counter)
print TRUNCATABLE_PRIMES
print 'Sum:', sum(TRUNCATABLE_PRIMES)
if __name__ == '__main__':
t = time()
main()
print 'Time:', time() - t
|
<commit_before><commit_msg>Add problem 37, truncatable primes<commit_after>
|
from problem_35 import is_prime
from time import time
TRUNCATABLE_PRIMES = []
def is_truncatable_prime(num):
for i in ['0', '4', '6', '8']:
if i in str(num):
return False
for i in ['2', '5']:
if i in str(num)[1:]:
return False
if not is_prime(num):
return False
for idx in range(len(str(num)) - 1):
if not is_prime(int(str(num)[idx+1:])):
return False
for idx in range(len(str(num)) - 1):
if not is_prime(int(str(num)[0:-1*idx-1])):
return False
return True
def main():
counter = 11
while len(TRUNCATABLE_PRIMES) < 11:
counter += 2
if is_truncatable_prime(counter):
TRUNCATABLE_PRIMES.append(counter)
print TRUNCATABLE_PRIMES
print 'Sum:', sum(TRUNCATABLE_PRIMES)
if __name__ == '__main__':
t = time()
main()
print 'Time:', time() - t
|
Add problem 37, truncatable primesfrom problem_35 import is_prime
from time import time
TRUNCATABLE_PRIMES = []
def is_truncatable_prime(num):
for i in ['0', '4', '6', '8']:
if i in str(num):
return False
for i in ['2', '5']:
if i in str(num)[1:]:
return False
if not is_prime(num):
return False
for idx in range(len(str(num)) - 1):
if not is_prime(int(str(num)[idx+1:])):
return False
for idx in range(len(str(num)) - 1):
if not is_prime(int(str(num)[0:-1*idx-1])):
return False
return True
def main():
counter = 11
while len(TRUNCATABLE_PRIMES) < 11:
counter += 2
if is_truncatable_prime(counter):
TRUNCATABLE_PRIMES.append(counter)
print TRUNCATABLE_PRIMES
print 'Sum:', sum(TRUNCATABLE_PRIMES)
if __name__ == '__main__':
t = time()
main()
print 'Time:', time() - t
|
<commit_before><commit_msg>Add problem 37, truncatable primes<commit_after>from problem_35 import is_prime
from time import time
TRUNCATABLE_PRIMES = []
def is_truncatable_prime(num):
for i in ['0', '4', '6', '8']:
if i in str(num):
return False
for i in ['2', '5']:
if i in str(num)[1:]:
return False
if not is_prime(num):
return False
for idx in range(len(str(num)) - 1):
if not is_prime(int(str(num)[idx+1:])):
return False
for idx in range(len(str(num)) - 1):
if not is_prime(int(str(num)[0:-1*idx-1])):
return False
return True
def main():
counter = 11
while len(TRUNCATABLE_PRIMES) < 11:
counter += 2
if is_truncatable_prime(counter):
TRUNCATABLE_PRIMES.append(counter)
print TRUNCATABLE_PRIMES
print 'Sum:', sum(TRUNCATABLE_PRIMES)
if __name__ == '__main__':
t = time()
main()
print 'Time:', time() - t
|
|
0d7a51312d35b99c4eb06a349a9050cf5bb72ce1
|
genotp.py
|
genotp.py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# Author: R.F. Smith <rsmith@xs4all.nl>
# $Date$
#
# To the extent possible under law, Roland Smith has waived all copyright and
# related or neighboring rights to genotp.py. This work is published from the
# Netherlands. See http://creativecommons.org/publicdomain/zero/1.0/
"""Generates a one-time pad."""
from __future__ import division, print_function
from os import urandom
def rndcaps(n):
"""Generates a string of n random capital letters."""
b = urandom(n)
return ''.join([chr(int(round(ord(c)/10.2))+65) for c in b])
if __name__ == '__main__':
for num in range(1, 67):
ls = ['{:02d} '.format(num)]
cb = [rndcaps(5) for j in range(0, 12)]
print(' '.join(ls+cb))
|
Simplify the generation of a one-time pad.
|
Simplify the generation of a one-time pad.
Renamed genoldpad.py to genotp.py. Removed the genline function; moved its
functionality to the main block.
|
Python
|
mit
|
rsmith-nl/scripts,rsmith-nl/scripts
|
Simplify the generation of a one-time pad.
Renamed genoldpad.py to genotp.py. Removed the genline function; moved its
functionality to the main block.
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# Author: R.F. Smith <rsmith@xs4all.nl>
# $Date$
#
# To the extent possible under law, Roland Smith has waived all copyright and
# related or neighboring rights to genotp.py. This work is published from the
# Netherlands. See http://creativecommons.org/publicdomain/zero/1.0/
"""Generates a one-time pad."""
from __future__ import division, print_function
from os import urandom
def rndcaps(n):
"""Generates a string of n random capital letters."""
b = urandom(n)
return ''.join([chr(int(round(ord(c)/10.2))+65) for c in b])
if __name__ == '__main__':
for num in range(1, 67):
ls = ['{:02d} '.format(num)]
cb = [rndcaps(5) for j in range(0, 12)]
print(' '.join(ls+cb))
|
<commit_before><commit_msg>Simplify the generation of a one-time pad.
Renamed genoldpad.py to genotp.py. Removed the genline function; moved its
functionality to the main block.<commit_after>
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# Author: R.F. Smith <rsmith@xs4all.nl>
# $Date$
#
# To the extent possible under law, Roland Smith has waived all copyright and
# related or neighboring rights to genotp.py. This work is published from the
# Netherlands. See http://creativecommons.org/publicdomain/zero/1.0/
"""Generates a one-time pad."""
from __future__ import division, print_function
from os import urandom
def rndcaps(n):
"""Generates a string of n random capital letters."""
b = urandom(n)
return ''.join([chr(int(round(ord(c)/10.2))+65) for c in b])
if __name__ == '__main__':
for num in range(1, 67):
ls = ['{:02d} '.format(num)]
cb = [rndcaps(5) for j in range(0, 12)]
print(' '.join(ls+cb))
|
Simplify the generation of a one-time pad.
Renamed genoldpad.py to genotp.py. Removed the genline function; moved its
functionality to the main block.#!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# Author: R.F. Smith <rsmith@xs4all.nl>
# $Date$
#
# To the extent possible under law, Roland Smith has waived all copyright and
# related or neighboring rights to genotp.py. This work is published from the
# Netherlands. See http://creativecommons.org/publicdomain/zero/1.0/
"""Generates a one-time pad."""
from __future__ import division, print_function
from os import urandom
def rndcaps(n):
"""Generates a string of n random capital letters."""
b = urandom(n)
return ''.join([chr(int(round(ord(c)/10.2))+65) for c in b])
if __name__ == '__main__':
for num in range(1, 67):
ls = ['{:02d} '.format(num)]
cb = [rndcaps(5) for j in range(0, 12)]
print(' '.join(ls+cb))
|
<commit_before><commit_msg>Simplify the generation of a one-time pad.
Renamed genoldpad.py to genotp.py. Removed the genline function; moved its
functionality to the main block.<commit_after>#!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# Author: R.F. Smith <rsmith@xs4all.nl>
# $Date$
#
# To the extent possible under law, Roland Smith has waived all copyright and
# related or neighboring rights to genotp.py. This work is published from the
# Netherlands. See http://creativecommons.org/publicdomain/zero/1.0/
"""Generates a one-time pad."""
from __future__ import division, print_function
from os import urandom
def rndcaps(n):
"""Generates a string of n random capital letters."""
b = urandom(n)
return ''.join([chr(int(round(ord(c)/10.2))+65) for c in b])
if __name__ == '__main__':
for num in range(1, 67):
ls = ['{:02d} '.format(num)]
cb = [rndcaps(5) for j in range(0, 12)]
print(' '.join(ls+cb))
|
|
05d3cb5b44bd828e354c699b5793ecdf6f4501cd
|
locations/tasks.py
|
locations/tasks.py
|
import urlparse
from celery.task import Task
from django.conf import settings
from seed_services_client import IdentityStoreApiClient
from .models import Parish
class SyncLocations(Task):
"""
Has a look at all the identity store identities, and ensures that all of
the locations assigned to identities appear in the list of locations.
"""
def get_identities(self, client):
"""
Returns an iterator over all the identities in the identity store
specified by 'client'.
"""
identities = client.get_identities()
while True:
for identity in identities.get('results', []):
yield identity
if identities.get('next') is not None:
qs = urlparse.urlparse(identities['next']).query
identities = client.get_identities(params=qs)
else:
break
def run(self, **kwargs):
l = self.get_logger(**kwargs)
l.info('Starting location import')
imported_count = 0
client = IdentityStoreApiClient(
settings.IDENTITY_STORE_TOKEN, settings.IDENTITY_STORE_URL)
for identity in self.get_identities(client):
parish = identity.get('details', {}).get('parish')
if parish is not None:
_, created = Parish.objects.get_or_create(name=parish.title())
if created:
imported_count += 1
l.info('Imported {} locations'.format(imported_count))
return imported_count
sync_locations = SyncLocations()
|
Add task to sync locations from identity store
|
Add task to sync locations from identity store
|
Python
|
bsd-3-clause
|
praekelt/familyconnect-registration,praekelt/familyconnect-registration
|
Add task to sync locations from identity store
|
import urlparse
from celery.task import Task
from django.conf import settings
from seed_services_client import IdentityStoreApiClient
from .models import Parish
class SyncLocations(Task):
"""
Has a look at all the identity store identities, and ensures that all of
the locations assigned to identities appear in the list of locations.
"""
def get_identities(self, client):
"""
Returns an iterator over all the identities in the identity store
specified by 'client'.
"""
identities = client.get_identities()
while True:
for identity in identities.get('results', []):
yield identity
if identities.get('next') is not None:
qs = urlparse.urlparse(identities['next']).query
identities = client.get_identities(params=qs)
else:
break
def run(self, **kwargs):
l = self.get_logger(**kwargs)
l.info('Starting location import')
imported_count = 0
client = IdentityStoreApiClient(
settings.IDENTITY_STORE_TOKEN, settings.IDENTITY_STORE_URL)
for identity in self.get_identities(client):
parish = identity.get('details', {}).get('parish')
if parish is not None:
_, created = Parish.objects.get_or_create(name=parish.title())
if created:
imported_count += 1
l.info('Imported {} locations'.format(imported_count))
return imported_count
sync_locations = SyncLocations()
|
<commit_before><commit_msg>Add task to sync locations from identity store<commit_after>
|
import urlparse
from celery.task import Task
from django.conf import settings
from seed_services_client import IdentityStoreApiClient
from .models import Parish
class SyncLocations(Task):
"""
Has a look at all the identity store identities, and ensures that all of
the locations assigned to identities appear in the list of locations.
"""
def get_identities(self, client):
"""
Returns an iterator over all the identities in the identity store
specified by 'client'.
"""
identities = client.get_identities()
while True:
for identity in identities.get('results', []):
yield identity
if identities.get('next') is not None:
qs = urlparse.urlparse(identities['next']).query
identities = client.get_identities(params=qs)
else:
break
def run(self, **kwargs):
l = self.get_logger(**kwargs)
l.info('Starting location import')
imported_count = 0
client = IdentityStoreApiClient(
settings.IDENTITY_STORE_TOKEN, settings.IDENTITY_STORE_URL)
for identity in self.get_identities(client):
parish = identity.get('details', {}).get('parish')
if parish is not None:
_, created = Parish.objects.get_or_create(name=parish.title())
if created:
imported_count += 1
l.info('Imported {} locations'.format(imported_count))
return imported_count
sync_locations = SyncLocations()
|
Add task to sync locations from identity storeimport urlparse
from celery.task import Task
from django.conf import settings
from seed_services_client import IdentityStoreApiClient
from .models import Parish
class SyncLocations(Task):
"""
Has a look at all the identity store identities, and ensures that all of
the locations assigned to identities appear in the list of locations.
"""
def get_identities(self, client):
"""
Returns an iterator over all the identities in the identity store
specified by 'client'.
"""
identities = client.get_identities()
while True:
for identity in identities.get('results', []):
yield identity
if identities.get('next') is not None:
qs = urlparse.urlparse(identities['next']).query
identities = client.get_identities(params=qs)
else:
break
def run(self, **kwargs):
l = self.get_logger(**kwargs)
l.info('Starting location import')
imported_count = 0
client = IdentityStoreApiClient(
settings.IDENTITY_STORE_TOKEN, settings.IDENTITY_STORE_URL)
for identity in self.get_identities(client):
parish = identity.get('details', {}).get('parish')
if parish is not None:
_, created = Parish.objects.get_or_create(name=parish.title())
if created:
imported_count += 1
l.info('Imported {} locations'.format(imported_count))
return imported_count
sync_locations = SyncLocations()
|
<commit_before><commit_msg>Add task to sync locations from identity store<commit_after>import urlparse
from celery.task import Task
from django.conf import settings
from seed_services_client import IdentityStoreApiClient
from .models import Parish
class SyncLocations(Task):
"""
Has a look at all the identity store identities, and ensures that all of
the locations assigned to identities appear in the list of locations.
"""
def get_identities(self, client):
"""
Returns an iterator over all the identities in the identity store
specified by 'client'.
"""
identities = client.get_identities()
while True:
for identity in identities.get('results', []):
yield identity
if identities.get('next') is not None:
qs = urlparse.urlparse(identities['next']).query
identities = client.get_identities(params=qs)
else:
break
def run(self, **kwargs):
l = self.get_logger(**kwargs)
l.info('Starting location import')
imported_count = 0
client = IdentityStoreApiClient(
settings.IDENTITY_STORE_TOKEN, settings.IDENTITY_STORE_URL)
for identity in self.get_identities(client):
parish = identity.get('details', {}).get('parish')
if parish is not None:
_, created = Parish.objects.get_or_create(name=parish.title())
if created:
imported_count += 1
l.info('Imported {} locations'.format(imported_count))
return imported_count
sync_locations = SyncLocations()
|
|
2f99b380ec5d725d1d27df584357324ade9e0782
|
test_hash.py
|
test_hash.py
|
from hash_table import HashTable
import io
import pytest
words = []
with io.open('/usr/share/dict/words', 'r') as word_file:
words = word_file.readlines()
def test_init():
ht = HashTable()
assert len(ht.table) == 1024
ht2 = HashTable(10000)
assert len(ht2.table) == 10000
def test_hash():
ht = HashTable()
ht.set('coffee', 'coffee')
assert ht.get('coffee') == 'coffee'
def test_duplicate_hash_val():
ht = HashTable()
ht.set('bob', 'bob')
ht.set('obb', 'obb')
assert ht.get('bob') == 'bob'
assert ht.get('obb') == 'obb'
def test_word_file():
ht = HashTable()
for word in words:
ht.set(word, word)
assert ht.get(words[654]) == words[654]
assert ht.get(words[3541]) == words[3541]
assert ht.get(words[6541]) == words[6541]
def test_non_item():
ht = HashTable()
ht.set('coffee', 'coffee')
with pytest.raises(IndexError):
ht.get('milk')
def test_non_bucket():
ht = HashTable()
with pytest.raises(IndexError):
ht.table[1025]
|
Add tests for hash table
|
Add tests for hash table
|
Python
|
mit
|
jwarren116/data-structures-deux
|
Add tests for hash table
|
from hash_table import HashTable
import io
import pytest
words = []
with io.open('/usr/share/dict/words', 'r') as word_file:
words = word_file.readlines()
def test_init():
ht = HashTable()
assert len(ht.table) == 1024
ht2 = HashTable(10000)
assert len(ht2.table) == 10000
def test_hash():
ht = HashTable()
ht.set('coffee', 'coffee')
assert ht.get('coffee') == 'coffee'
def test_duplicate_hash_val():
ht = HashTable()
ht.set('bob', 'bob')
ht.set('obb', 'obb')
assert ht.get('bob') == 'bob'
assert ht.get('obb') == 'obb'
def test_word_file():
ht = HashTable()
for word in words:
ht.set(word, word)
assert ht.get(words[654]) == words[654]
assert ht.get(words[3541]) == words[3541]
assert ht.get(words[6541]) == words[6541]
def test_non_item():
ht = HashTable()
ht.set('coffee', 'coffee')
with pytest.raises(IndexError):
ht.get('milk')
def test_non_bucket():
ht = HashTable()
with pytest.raises(IndexError):
ht.table[1025]
|
<commit_before><commit_msg>Add tests for hash table<commit_after>
|
from hash_table import HashTable
import io
import pytest
words = []
with io.open('/usr/share/dict/words', 'r') as word_file:
words = word_file.readlines()
def test_init():
ht = HashTable()
assert len(ht.table) == 1024
ht2 = HashTable(10000)
assert len(ht2.table) == 10000
def test_hash():
ht = HashTable()
ht.set('coffee', 'coffee')
assert ht.get('coffee') == 'coffee'
def test_duplicate_hash_val():
ht = HashTable()
ht.set('bob', 'bob')
ht.set('obb', 'obb')
assert ht.get('bob') == 'bob'
assert ht.get('obb') == 'obb'
def test_word_file():
ht = HashTable()
for word in words:
ht.set(word, word)
assert ht.get(words[654]) == words[654]
assert ht.get(words[3541]) == words[3541]
assert ht.get(words[6541]) == words[6541]
def test_non_item():
ht = HashTable()
ht.set('coffee', 'coffee')
with pytest.raises(IndexError):
ht.get('milk')
def test_non_bucket():
ht = HashTable()
with pytest.raises(IndexError):
ht.table[1025]
|
Add tests for hash tablefrom hash_table import HashTable
import io
import pytest
words = []
with io.open('/usr/share/dict/words', 'r') as word_file:
words = word_file.readlines()
def test_init():
ht = HashTable()
assert len(ht.table) == 1024
ht2 = HashTable(10000)
assert len(ht2.table) == 10000
def test_hash():
ht = HashTable()
ht.set('coffee', 'coffee')
assert ht.get('coffee') == 'coffee'
def test_duplicate_hash_val():
ht = HashTable()
ht.set('bob', 'bob')
ht.set('obb', 'obb')
assert ht.get('bob') == 'bob'
assert ht.get('obb') == 'obb'
def test_word_file():
ht = HashTable()
for word in words:
ht.set(word, word)
assert ht.get(words[654]) == words[654]
assert ht.get(words[3541]) == words[3541]
assert ht.get(words[6541]) == words[6541]
def test_non_item():
ht = HashTable()
ht.set('coffee', 'coffee')
with pytest.raises(IndexError):
ht.get('milk')
def test_non_bucket():
ht = HashTable()
with pytest.raises(IndexError):
ht.table[1025]
|
<commit_before><commit_msg>Add tests for hash table<commit_after>from hash_table import HashTable
import io
import pytest
words = []
with io.open('/usr/share/dict/words', 'r') as word_file:
words = word_file.readlines()
def test_init():
ht = HashTable()
assert len(ht.table) == 1024
ht2 = HashTable(10000)
assert len(ht2.table) == 10000
def test_hash():
ht = HashTable()
ht.set('coffee', 'coffee')
assert ht.get('coffee') == 'coffee'
def test_duplicate_hash_val():
ht = HashTable()
ht.set('bob', 'bob')
ht.set('obb', 'obb')
assert ht.get('bob') == 'bob'
assert ht.get('obb') == 'obb'
def test_word_file():
ht = HashTable()
for word in words:
ht.set(word, word)
assert ht.get(words[654]) == words[654]
assert ht.get(words[3541]) == words[3541]
assert ht.get(words[6541]) == words[6541]
def test_non_item():
ht = HashTable()
ht.set('coffee', 'coffee')
with pytest.raises(IndexError):
ht.get('milk')
def test_non_bucket():
ht = HashTable()
with pytest.raises(IndexError):
ht.table[1025]
|
|
d42cc110c3da50a62b62acb6585d4eb1d4254d66
|
examples/plot_sin_black_background.py
|
examples/plot_sin_black_background.py
|
# -*- coding: utf-8 -*-
"""
==================================================
Plotting simple sin function on a black background
==================================================
A simple example of the plot of a sin function on a black background
"""
# Code source: Loïc Estève
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
bg_color = 'black'
fg_color = 'white'
fig = plt.figure(facecolor=bg_color, edgecolor=fg_color)
axes = plt.axes((0.1, 0.1, 0.8, 0.8), axisbg=bg_color)
axes.xaxis.set_tick_params(color=fg_color, labelcolor=fg_color)
axes.yaxis.set_tick_params(color=fg_color, labelcolor=fg_color)
for spine in axes.spines.values():
spine.set_color(fg_color)
x = np.linspace(0, 2*np.pi, 100)
y = np.sin(x)
plt.plot(x, y, 'cyan', axes=axes)
plt.xlabel('$x$', color=fg_color)
plt.ylabel('$\sin(x)$', color=fg_color)
plt.show()
|
Add plot with black background
|
Add plot with black background
in order to show the impact of commit d5a5507cd9d4b2ba4af0bf4cf42bbda043853cb5
|
Python
|
bsd-3-clause
|
lesteve/sphinx-gallery,Eric89GXL/sphinx-gallery,Titan-C/sphinx-gallery,lesteve/sphinx-gallery,sphinx-gallery/sphinx-gallery,sphinx-gallery/sphinx-gallery,Titan-C/sphinx-gallery,Eric89GXL/sphinx-gallery
|
Add plot with black background
in order to show the impact of commit d5a5507cd9d4b2ba4af0bf4cf42bbda043853cb5
|
# -*- coding: utf-8 -*-
"""
==================================================
Plotting simple sin function on a black background
==================================================
A simple example of the plot of a sin function on a black background
"""
# Code source: Loïc Estève
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
bg_color = 'black'
fg_color = 'white'
fig = plt.figure(facecolor=bg_color, edgecolor=fg_color)
axes = plt.axes((0.1, 0.1, 0.8, 0.8), axisbg=bg_color)
axes.xaxis.set_tick_params(color=fg_color, labelcolor=fg_color)
axes.yaxis.set_tick_params(color=fg_color, labelcolor=fg_color)
for spine in axes.spines.values():
spine.set_color(fg_color)
x = np.linspace(0, 2*np.pi, 100)
y = np.sin(x)
plt.plot(x, y, 'cyan', axes=axes)
plt.xlabel('$x$', color=fg_color)
plt.ylabel('$\sin(x)$', color=fg_color)
plt.show()
|
<commit_before><commit_msg>Add plot with black background
in order to show the impact of commit d5a5507cd9d4b2ba4af0bf4cf42bbda043853cb5<commit_after>
|
# -*- coding: utf-8 -*-
"""
==================================================
Plotting simple sin function on a black background
==================================================
A simple example of the plot of a sin function on a black background
"""
# Code source: Loïc Estève
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
bg_color = 'black'
fg_color = 'white'
fig = plt.figure(facecolor=bg_color, edgecolor=fg_color)
axes = plt.axes((0.1, 0.1, 0.8, 0.8), axisbg=bg_color)
axes.xaxis.set_tick_params(color=fg_color, labelcolor=fg_color)
axes.yaxis.set_tick_params(color=fg_color, labelcolor=fg_color)
for spine in axes.spines.values():
spine.set_color(fg_color)
x = np.linspace(0, 2*np.pi, 100)
y = np.sin(x)
plt.plot(x, y, 'cyan', axes=axes)
plt.xlabel('$x$', color=fg_color)
plt.ylabel('$\sin(x)$', color=fg_color)
plt.show()
|
Add plot with black background
in order to show the impact of commit d5a5507cd9d4b2ba4af0bf4cf42bbda043853cb5# -*- coding: utf-8 -*-
"""
==================================================
Plotting simple sin function on a black background
==================================================
A simple example of the plot of a sin function on a black background
"""
# Code source: Loïc Estève
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
bg_color = 'black'
fg_color = 'white'
fig = plt.figure(facecolor=bg_color, edgecolor=fg_color)
axes = plt.axes((0.1, 0.1, 0.8, 0.8), axisbg=bg_color)
axes.xaxis.set_tick_params(color=fg_color, labelcolor=fg_color)
axes.yaxis.set_tick_params(color=fg_color, labelcolor=fg_color)
for spine in axes.spines.values():
spine.set_color(fg_color)
x = np.linspace(0, 2*np.pi, 100)
y = np.sin(x)
plt.plot(x, y, 'cyan', axes=axes)
plt.xlabel('$x$', color=fg_color)
plt.ylabel('$\sin(x)$', color=fg_color)
plt.show()
|
<commit_before><commit_msg>Add plot with black background
in order to show the impact of commit d5a5507cd9d4b2ba4af0bf4cf42bbda043853cb5<commit_after># -*- coding: utf-8 -*-
"""
==================================================
Plotting simple sin function on a black background
==================================================
A simple example of the plot of a sin function on a black background
"""
# Code source: Loïc Estève
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
bg_color = 'black'
fg_color = 'white'
fig = plt.figure(facecolor=bg_color, edgecolor=fg_color)
axes = plt.axes((0.1, 0.1, 0.8, 0.8), axisbg=bg_color)
axes.xaxis.set_tick_params(color=fg_color, labelcolor=fg_color)
axes.yaxis.set_tick_params(color=fg_color, labelcolor=fg_color)
for spine in axes.spines.values():
spine.set_color(fg_color)
x = np.linspace(0, 2*np.pi, 100)
y = np.sin(x)
plt.plot(x, y, 'cyan', axes=axes)
plt.xlabel('$x$', color=fg_color)
plt.ylabel('$\sin(x)$', color=fg_color)
plt.show()
|
|
fde5e4378030a5332f853b0af73ed9a86c384108
|
connectors/fix_file_extensions.py
|
connectors/fix_file_extensions.py
|
import os
import subprocess
ROOT = 'e:\\'
def main():
total_renames = 0
for dir, dirs, files in os.walk(ROOT):
for f in files:
path = os.path.normpath(os.path.join(dir, f))
without_extension, fileExtension = os.path.splitext(path)
fileExtension = fileExtension.lower().lstrip('.')
if fileExtension == 'db':
continue
result = subprocess.check_output(['file', '-b', '--mime-type', path], stderr=subprocess.STDOUT)
actual_extension = None
if result is None:
print 'No type for: ' + path
elif 'image/jpeg' in result:
actual_extension = 'JPG'
elif 'image/gif' in result:
actual_extension = 'GIF'
elif 'image/png' in result:
actual_extension = 'PNG'
elif 'video/quicktime' in result:
actual_extension = 'MOV'
elif 'video/mp4' in result:
actual_extension = 'MP4'
else:
print 'Unknown type: ' + result + ' : ' + path
if actual_extension is None:
continue
if fileExtension != actual_extension.lower():
if fileExtension == 'jpeg' and actual_extension.lower() == 'jpg':
print 'skipped jpeg->jpg'
continue
try:
total_renames += 1
new_path = without_extension + '.' + actual_extension
print 'Need to fix extension of ' + path + ' to: ' + new_path
#uncomment to do actual renames
#os.rename(path, new_path)
except Exception as e:
print 'Exception renaming file: ' + e
print 'done, total renames : %d' % total_renames
if __name__ == "__main__":
try:
main()
except Exception as e:
print 'Exception: ' + e.message
|
Add script to fix file extensions from messed up windows import
|
Add script to fix file extensions from messed up windows import
|
Python
|
mit
|
chuckhays/smugsync
|
Add script to fix file extensions from messed up windows import
|
import os
import subprocess
ROOT = 'e:\\'
def main():
total_renames = 0
for dir, dirs, files in os.walk(ROOT):
for f in files:
path = os.path.normpath(os.path.join(dir, f))
without_extension, fileExtension = os.path.splitext(path)
fileExtension = fileExtension.lower().lstrip('.')
if fileExtension == 'db':
continue
result = subprocess.check_output(['file', '-b', '--mime-type', path], stderr=subprocess.STDOUT)
actual_extension = None
if result is None:
print 'No type for: ' + path
elif 'image/jpeg' in result:
actual_extension = 'JPG'
elif 'image/gif' in result:
actual_extension = 'GIF'
elif 'image/png' in result:
actual_extension = 'PNG'
elif 'video/quicktime' in result:
actual_extension = 'MOV'
elif 'video/mp4' in result:
actual_extension = 'MP4'
else:
print 'Unknown type: ' + result + ' : ' + path
if actual_extension is None:
continue
if fileExtension != actual_extension.lower():
if fileExtension == 'jpeg' and actual_extension.lower() == 'jpg':
print 'skipped jpeg->jpg'
continue
try:
total_renames += 1
new_path = without_extension + '.' + actual_extension
print 'Need to fix extension of ' + path + ' to: ' + new_path
#uncomment to do actual renames
#os.rename(path, new_path)
except Exception as e:
print 'Exception renaming file: ' + e
print 'done, total renames : %d' % total_renames
if __name__ == "__main__":
try:
main()
except Exception as e:
print 'Exception: ' + e.message
|
<commit_before><commit_msg>Add script to fix file extensions from messed up windows import<commit_after>
|
import os
import subprocess
ROOT = 'e:\\'
def main():
total_renames = 0
for dir, dirs, files in os.walk(ROOT):
for f in files:
path = os.path.normpath(os.path.join(dir, f))
without_extension, fileExtension = os.path.splitext(path)
fileExtension = fileExtension.lower().lstrip('.')
if fileExtension == 'db':
continue
result = subprocess.check_output(['file', '-b', '--mime-type', path], stderr=subprocess.STDOUT)
actual_extension = None
if result is None:
print 'No type for: ' + path
elif 'image/jpeg' in result:
actual_extension = 'JPG'
elif 'image/gif' in result:
actual_extension = 'GIF'
elif 'image/png' in result:
actual_extension = 'PNG'
elif 'video/quicktime' in result:
actual_extension = 'MOV'
elif 'video/mp4' in result:
actual_extension = 'MP4'
else:
print 'Unknown type: ' + result + ' : ' + path
if actual_extension is None:
continue
if fileExtension != actual_extension.lower():
if fileExtension == 'jpeg' and actual_extension.lower() == 'jpg':
print 'skipped jpeg->jpg'
continue
try:
total_renames += 1
new_path = without_extension + '.' + actual_extension
print 'Need to fix extension of ' + path + ' to: ' + new_path
#uncomment to do actual renames
#os.rename(path, new_path)
except Exception as e:
print 'Exception renaming file: ' + e
print 'done, total renames : %d' % total_renames
if __name__ == "__main__":
try:
main()
except Exception as e:
print 'Exception: ' + e.message
|
Add script to fix file extensions from messed up windows importimport os
import subprocess
ROOT = 'e:\\'
def main():
total_renames = 0
for dir, dirs, files in os.walk(ROOT):
for f in files:
path = os.path.normpath(os.path.join(dir, f))
without_extension, fileExtension = os.path.splitext(path)
fileExtension = fileExtension.lower().lstrip('.')
if fileExtension == 'db':
continue
result = subprocess.check_output(['file', '-b', '--mime-type', path], stderr=subprocess.STDOUT)
actual_extension = None
if result is None:
print 'No type for: ' + path
elif 'image/jpeg' in result:
actual_extension = 'JPG'
elif 'image/gif' in result:
actual_extension = 'GIF'
elif 'image/png' in result:
actual_extension = 'PNG'
elif 'video/quicktime' in result:
actual_extension = 'MOV'
elif 'video/mp4' in result:
actual_extension = 'MP4'
else:
print 'Unknown type: ' + result + ' : ' + path
if actual_extension is None:
continue
if fileExtension != actual_extension.lower():
if fileExtension == 'jpeg' and actual_extension.lower() == 'jpg':
print 'skipped jpeg->jpg'
continue
try:
total_renames += 1
new_path = without_extension + '.' + actual_extension
print 'Need to fix extension of ' + path + ' to: ' + new_path
#uncomment to do actual renames
#os.rename(path, new_path)
except Exception as e:
print 'Exception renaming file: ' + e
print 'done, total renames : %d' % total_renames
if __name__ == "__main__":
try:
main()
except Exception as e:
print 'Exception: ' + e.message
|
<commit_before><commit_msg>Add script to fix file extensions from messed up windows import<commit_after>import os
import subprocess
ROOT = 'e:\\'
def main():
total_renames = 0
for dir, dirs, files in os.walk(ROOT):
for f in files:
path = os.path.normpath(os.path.join(dir, f))
without_extension, fileExtension = os.path.splitext(path)
fileExtension = fileExtension.lower().lstrip('.')
if fileExtension == 'db':
continue
result = subprocess.check_output(['file', '-b', '--mime-type', path], stderr=subprocess.STDOUT)
actual_extension = None
if result is None:
print 'No type for: ' + path
elif 'image/jpeg' in result:
actual_extension = 'JPG'
elif 'image/gif' in result:
actual_extension = 'GIF'
elif 'image/png' in result:
actual_extension = 'PNG'
elif 'video/quicktime' in result:
actual_extension = 'MOV'
elif 'video/mp4' in result:
actual_extension = 'MP4'
else:
print 'Unknown type: ' + result + ' : ' + path
if actual_extension is None:
continue
if fileExtension != actual_extension.lower():
if fileExtension == 'jpeg' and actual_extension.lower() == 'jpg':
print 'skipped jpeg->jpg'
continue
try:
total_renames += 1
new_path = without_extension + '.' + actual_extension
print 'Need to fix extension of ' + path + ' to: ' + new_path
#uncomment to do actual renames
#os.rename(path, new_path)
except Exception as e:
print 'Exception renaming file: ' + e
print 'done, total renames : %d' % total_renames
if __name__ == "__main__":
try:
main()
except Exception as e:
print 'Exception: ' + e.message
|
|
937c337b35c36a91b7d153ca157b077e38641b0e
|
phyhlc/__init__.py
|
phyhlc/__init__.py
|
# encoding: UTF-8
"""
Physics and High-Level Controls library
.. moduleauthor:: Dylan Maxwell <maxwelld@frib.msu.edu>
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import logging
# Configure the root logger with the default format. This function does
# nothing if the root logger already has handlers configured for it.
logging.basicConfig(format="%(levelname)s: %(asctime)s: %(name)s: %(message)s")
|
Add new top level package matching the repository name.
|
Add new top level package matching the repository name.
|
Python
|
bsd-3-clause
|
archman/phantasy,archman/phantasy
|
Add new top level package matching the repository name.
|
# encoding: UTF-8
"""
Physics and High-Level Controls library
.. moduleauthor:: Dylan Maxwell <maxwelld@frib.msu.edu>
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import logging
# Configure the root logger with the default format. This function does
# nothing if the root logger already has handlers configured for it.
logging.basicConfig(format="%(levelname)s: %(asctime)s: %(name)s: %(message)s")
|
<commit_before><commit_msg>Add new top level package matching the repository name.<commit_after>
|
# encoding: UTF-8
"""
Physics and High-Level Controls library
.. moduleauthor:: Dylan Maxwell <maxwelld@frib.msu.edu>
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import logging
# Configure the root logger with the default format. This function does
# nothing if the root logger already has handlers configured for it.
logging.basicConfig(format="%(levelname)s: %(asctime)s: %(name)s: %(message)s")
|
Add new top level package matching the repository name.# encoding: UTF-8
"""
Physics and High-Level Controls library
.. moduleauthor:: Dylan Maxwell <maxwelld@frib.msu.edu>
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import logging
# Configure the root logger with the default format. This function does
# nothing if the root logger already has handlers configured for it.
logging.basicConfig(format="%(levelname)s: %(asctime)s: %(name)s: %(message)s")
|
<commit_before><commit_msg>Add new top level package matching the repository name.<commit_after># encoding: UTF-8
"""
Physics and High-Level Controls library
.. moduleauthor:: Dylan Maxwell <maxwelld@frib.msu.edu>
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import logging
# Configure the root logger with the default format. This function does
# nothing if the root logger already has handlers configured for it.
logging.basicConfig(format="%(levelname)s: %(asctime)s: %(name)s: %(message)s")
|
|
ac36d66d7e25bbc0aa4dcef2ce364ca7aca841ea
|
lib/bridgedb/test/test_email_dkim.py
|
lib/bridgedb/test/test_email_dkim.py
|
# -*- coding: utf-8 -*-
#
# This file is part of BridgeDB, a Tor bridge distribution system.
#
# :authors: Isis Lovecruft 0xA3ADB67A2CDB8B35 <isis@torproject.org>
# please also see AUTHORS file
# :copyright: (c) 2013, Isis Lovecruft
# (c) 2007-2013, The Tor Project, Inc.
# (c) 2007-2013, all entities within the AUTHORS file
# :license: 3-Clause BSD, see LICENSE for licensing information
"""Unittests for the :mod:`bridgedb.email.dkim` module."""
import io
from twisted.mail.smtp import rfc822
from twisted.trial import unittest
from bridgedb.email import dkim
class CheckDKIMTests(unittest.TestCase):
"""Tests for :func:`email.server.checkDKIM`."""
def setUp(self):
"""Create fake email, distributor, and associated context data."""
self.goodMessage = io.StringIO(unicode("""\
From: user@gmail.com
To: bridges@localhost
X-DKIM-Authentication-Results: pass
Subject: testing
get bridges
"""))
self.badMessage = io.StringIO(unicode("""\
From: user@gmail.com
To: bridges@localhost
Subject: testing
get bridges
"""))
self.domainRules = {
'gmail.com': ["ignore_dots", "dkim"],
'example.com': [],
'localhost': [],
}
def test_checkDKIM_good(self):
message = rfc822.Message(self.goodMessage)
result = dkim.checkDKIM(message,
self.domainRules.get("gmail.com"))
self.assertTrue(result)
def test_checkDKIM_bad(self):
message = rfc822.Message(self.badMessage)
result = dkim.checkDKIM(message,
self.domainRules.get("gmail.com"))
self.assertIs(result, False)
|
Add unittests for bridgedb.email.dkim module.
|
Add unittests for bridgedb.email.dkim module.
|
Python
|
bsd-3-clause
|
pagea/bridgedb,pagea/bridgedb
|
Add unittests for bridgedb.email.dkim module.
|
# -*- coding: utf-8 -*-
#
# This file is part of BridgeDB, a Tor bridge distribution system.
#
# :authors: Isis Lovecruft 0xA3ADB67A2CDB8B35 <isis@torproject.org>
# please also see AUTHORS file
# :copyright: (c) 2013, Isis Lovecruft
# (c) 2007-2013, The Tor Project, Inc.
# (c) 2007-2013, all entities within the AUTHORS file
# :license: 3-Clause BSD, see LICENSE for licensing information
"""Unittests for the :mod:`bridgedb.email.dkim` module."""
import io
from twisted.mail.smtp import rfc822
from twisted.trial import unittest
from bridgedb.email import dkim
class CheckDKIMTests(unittest.TestCase):
"""Tests for :func:`email.server.checkDKIM`."""
def setUp(self):
"""Create fake email, distributor, and associated context data."""
self.goodMessage = io.StringIO(unicode("""\
From: user@gmail.com
To: bridges@localhost
X-DKIM-Authentication-Results: pass
Subject: testing
get bridges
"""))
self.badMessage = io.StringIO(unicode("""\
From: user@gmail.com
To: bridges@localhost
Subject: testing
get bridges
"""))
self.domainRules = {
'gmail.com': ["ignore_dots", "dkim"],
'example.com': [],
'localhost': [],
}
def test_checkDKIM_good(self):
message = rfc822.Message(self.goodMessage)
result = dkim.checkDKIM(message,
self.domainRules.get("gmail.com"))
self.assertTrue(result)
def test_checkDKIM_bad(self):
message = rfc822.Message(self.badMessage)
result = dkim.checkDKIM(message,
self.domainRules.get("gmail.com"))
self.assertIs(result, False)
|
<commit_before><commit_msg>Add unittests for bridgedb.email.dkim module.<commit_after>
|
# -*- coding: utf-8 -*-
#
# This file is part of BridgeDB, a Tor bridge distribution system.
#
# :authors: Isis Lovecruft 0xA3ADB67A2CDB8B35 <isis@torproject.org>
# please also see AUTHORS file
# :copyright: (c) 2013, Isis Lovecruft
# (c) 2007-2013, The Tor Project, Inc.
# (c) 2007-2013, all entities within the AUTHORS file
# :license: 3-Clause BSD, see LICENSE for licensing information
"""Unittests for the :mod:`bridgedb.email.dkim` module."""
import io
from twisted.mail.smtp import rfc822
from twisted.trial import unittest
from bridgedb.email import dkim
class CheckDKIMTests(unittest.TestCase):
"""Tests for :func:`email.server.checkDKIM`."""
def setUp(self):
"""Create fake email, distributor, and associated context data."""
self.goodMessage = io.StringIO(unicode("""\
From: user@gmail.com
To: bridges@localhost
X-DKIM-Authentication-Results: pass
Subject: testing
get bridges
"""))
self.badMessage = io.StringIO(unicode("""\
From: user@gmail.com
To: bridges@localhost
Subject: testing
get bridges
"""))
self.domainRules = {
'gmail.com': ["ignore_dots", "dkim"],
'example.com': [],
'localhost': [],
}
def test_checkDKIM_good(self):
message = rfc822.Message(self.goodMessage)
result = dkim.checkDKIM(message,
self.domainRules.get("gmail.com"))
self.assertTrue(result)
def test_checkDKIM_bad(self):
message = rfc822.Message(self.badMessage)
result = dkim.checkDKIM(message,
self.domainRules.get("gmail.com"))
self.assertIs(result, False)
|
Add unittests for bridgedb.email.dkim module.# -*- coding: utf-8 -*-
#
# This file is part of BridgeDB, a Tor bridge distribution system.
#
# :authors: Isis Lovecruft 0xA3ADB67A2CDB8B35 <isis@torproject.org>
# please also see AUTHORS file
# :copyright: (c) 2013, Isis Lovecruft
# (c) 2007-2013, The Tor Project, Inc.
# (c) 2007-2013, all entities within the AUTHORS file
# :license: 3-Clause BSD, see LICENSE for licensing information
"""Unittests for the :mod:`bridgedb.email.dkim` module."""
import io
from twisted.mail.smtp import rfc822
from twisted.trial import unittest
from bridgedb.email import dkim
class CheckDKIMTests(unittest.TestCase):
"""Tests for :func:`email.server.checkDKIM`."""
def setUp(self):
"""Create fake email, distributor, and associated context data."""
self.goodMessage = io.StringIO(unicode("""\
From: user@gmail.com
To: bridges@localhost
X-DKIM-Authentication-Results: pass
Subject: testing
get bridges
"""))
self.badMessage = io.StringIO(unicode("""\
From: user@gmail.com
To: bridges@localhost
Subject: testing
get bridges
"""))
self.domainRules = {
'gmail.com': ["ignore_dots", "dkim"],
'example.com': [],
'localhost': [],
}
def test_checkDKIM_good(self):
message = rfc822.Message(self.goodMessage)
result = dkim.checkDKIM(message,
self.domainRules.get("gmail.com"))
self.assertTrue(result)
def test_checkDKIM_bad(self):
message = rfc822.Message(self.badMessage)
result = dkim.checkDKIM(message,
self.domainRules.get("gmail.com"))
self.assertIs(result, False)
|
<commit_before><commit_msg>Add unittests for bridgedb.email.dkim module.<commit_after># -*- coding: utf-8 -*-
#
# This file is part of BridgeDB, a Tor bridge distribution system.
#
# :authors: Isis Lovecruft 0xA3ADB67A2CDB8B35 <isis@torproject.org>
# please also see AUTHORS file
# :copyright: (c) 2013, Isis Lovecruft
# (c) 2007-2013, The Tor Project, Inc.
# (c) 2007-2013, all entities within the AUTHORS file
# :license: 3-Clause BSD, see LICENSE for licensing information
"""Unittests for the :mod:`bridgedb.email.dkim` module."""
import io
from twisted.mail.smtp import rfc822
from twisted.trial import unittest
from bridgedb.email import dkim
class CheckDKIMTests(unittest.TestCase):
"""Tests for :func:`email.server.checkDKIM`."""
def setUp(self):
"""Create fake email, distributor, and associated context data."""
self.goodMessage = io.StringIO(unicode("""\
From: user@gmail.com
To: bridges@localhost
X-DKIM-Authentication-Results: pass
Subject: testing
get bridges
"""))
self.badMessage = io.StringIO(unicode("""\
From: user@gmail.com
To: bridges@localhost
Subject: testing
get bridges
"""))
self.domainRules = {
'gmail.com': ["ignore_dots", "dkim"],
'example.com': [],
'localhost': [],
}
def test_checkDKIM_good(self):
message = rfc822.Message(self.goodMessage)
result = dkim.checkDKIM(message,
self.domainRules.get("gmail.com"))
self.assertTrue(result)
def test_checkDKIM_bad(self):
message = rfc822.Message(self.badMessage)
result = dkim.checkDKIM(message,
self.domainRules.get("gmail.com"))
self.assertIs(result, False)
|
|
e04bdd9bc27c5c19e55cce98d2876403d9c7397d
|
ooni/tests/test_templates.py
|
ooni/tests/test_templates.py
|
from ooni.settings import config
from ooni.templates import httpt
from twisted.internet.error import DNSLookupError
from twisted.internet import reactor, defer
from twisted.trial import unittest
config.logging = False
class TestHTTPT(unittest.TestCase):
def setUp(self):
from twisted.web.resource import Resource
from twisted.web.server import Site
class DummyResource(Resource):
isLeaf = True
def render_GET(self, request):
return "%s" % request.method
r = DummyResource()
factory = Site(r)
self.port = reactor.listenTCP(8880, factory)
def tearDown(self):
self.port.stopListening()
@defer.inlineCallbacks
def test_do_request(self):
http_test = httpt.HTTPTest()
http_test.localOptions['socksproxy'] = None
http_test._setUp()
response = yield http_test.doRequest('http://localhost:8880/')
assert response.body == "GET"
assert len(http_test.report['requests']) == 1
assert 'request' in http_test.report['requests'][0]
assert 'response' in http_test.report['requests'][0]
@defer.inlineCallbacks
def test_do_failing_request(self):
http_test = httpt.HTTPTest()
http_test.localOptions['socksproxy'] = None
http_test._setUp()
yield self.assertFailure(http_test.doRequest('http://invaliddomain/'), DNSLookupError)
assert http_test.report['requests'][0]['failure'] == 'dns_lookup_error'
|
Add unittests for the HTTP test template.
|
Add unittests for the HTTP test template.
|
Python
|
bsd-2-clause
|
kdmurray91/ooni-probe,kdmurray91/ooni-probe,lordappsec/ooni-probe,lordappsec/ooni-probe,juga0/ooni-probe,lordappsec/ooni-probe,Karthikeyan-kkk/ooni-probe,lordappsec/ooni-probe,0xPoly/ooni-probe,kdmurray91/ooni-probe,juga0/ooni-probe,Karthikeyan-kkk/ooni-probe,0xPoly/ooni-probe,Karthikeyan-kkk/ooni-probe,kdmurray91/ooni-probe,0xPoly/ooni-probe,Karthikeyan-kkk/ooni-probe,0xPoly/ooni-probe,juga0/ooni-probe,juga0/ooni-probe
|
Add unittests for the HTTP test template.
|
from ooni.settings import config
from ooni.templates import httpt
from twisted.internet.error import DNSLookupError
from twisted.internet import reactor, defer
from twisted.trial import unittest
config.logging = False
class TestHTTPT(unittest.TestCase):
def setUp(self):
from twisted.web.resource import Resource
from twisted.web.server import Site
class DummyResource(Resource):
isLeaf = True
def render_GET(self, request):
return "%s" % request.method
r = DummyResource()
factory = Site(r)
self.port = reactor.listenTCP(8880, factory)
def tearDown(self):
self.port.stopListening()
@defer.inlineCallbacks
def test_do_request(self):
http_test = httpt.HTTPTest()
http_test.localOptions['socksproxy'] = None
http_test._setUp()
response = yield http_test.doRequest('http://localhost:8880/')
assert response.body == "GET"
assert len(http_test.report['requests']) == 1
assert 'request' in http_test.report['requests'][0]
assert 'response' in http_test.report['requests'][0]
@defer.inlineCallbacks
def test_do_failing_request(self):
http_test = httpt.HTTPTest()
http_test.localOptions['socksproxy'] = None
http_test._setUp()
yield self.assertFailure(http_test.doRequest('http://invaliddomain/'), DNSLookupError)
assert http_test.report['requests'][0]['failure'] == 'dns_lookup_error'
|
<commit_before><commit_msg>Add unittests for the HTTP test template.<commit_after>
|
from ooni.settings import config
from ooni.templates import httpt
from twisted.internet.error import DNSLookupError
from twisted.internet import reactor, defer
from twisted.trial import unittest
config.logging = False
class TestHTTPT(unittest.TestCase):
def setUp(self):
from twisted.web.resource import Resource
from twisted.web.server import Site
class DummyResource(Resource):
isLeaf = True
def render_GET(self, request):
return "%s" % request.method
r = DummyResource()
factory = Site(r)
self.port = reactor.listenTCP(8880, factory)
def tearDown(self):
self.port.stopListening()
@defer.inlineCallbacks
def test_do_request(self):
http_test = httpt.HTTPTest()
http_test.localOptions['socksproxy'] = None
http_test._setUp()
response = yield http_test.doRequest('http://localhost:8880/')
assert response.body == "GET"
assert len(http_test.report['requests']) == 1
assert 'request' in http_test.report['requests'][0]
assert 'response' in http_test.report['requests'][0]
@defer.inlineCallbacks
def test_do_failing_request(self):
http_test = httpt.HTTPTest()
http_test.localOptions['socksproxy'] = None
http_test._setUp()
yield self.assertFailure(http_test.doRequest('http://invaliddomain/'), DNSLookupError)
assert http_test.report['requests'][0]['failure'] == 'dns_lookup_error'
|
Add unittests for the HTTP test template.from ooni.settings import config
from ooni.templates import httpt
from twisted.internet.error import DNSLookupError
from twisted.internet import reactor, defer
from twisted.trial import unittest
config.logging = False
class TestHTTPT(unittest.TestCase):
def setUp(self):
from twisted.web.resource import Resource
from twisted.web.server import Site
class DummyResource(Resource):
isLeaf = True
def render_GET(self, request):
return "%s" % request.method
r = DummyResource()
factory = Site(r)
self.port = reactor.listenTCP(8880, factory)
def tearDown(self):
self.port.stopListening()
@defer.inlineCallbacks
def test_do_request(self):
http_test = httpt.HTTPTest()
http_test.localOptions['socksproxy'] = None
http_test._setUp()
response = yield http_test.doRequest('http://localhost:8880/')
assert response.body == "GET"
assert len(http_test.report['requests']) == 1
assert 'request' in http_test.report['requests'][0]
assert 'response' in http_test.report['requests'][0]
@defer.inlineCallbacks
def test_do_failing_request(self):
http_test = httpt.HTTPTest()
http_test.localOptions['socksproxy'] = None
http_test._setUp()
yield self.assertFailure(http_test.doRequest('http://invaliddomain/'), DNSLookupError)
assert http_test.report['requests'][0]['failure'] == 'dns_lookup_error'
|
<commit_before><commit_msg>Add unittests for the HTTP test template.<commit_after>from ooni.settings import config
from ooni.templates import httpt
from twisted.internet.error import DNSLookupError
from twisted.internet import reactor, defer
from twisted.trial import unittest
config.logging = False
class TestHTTPT(unittest.TestCase):
def setUp(self):
from twisted.web.resource import Resource
from twisted.web.server import Site
class DummyResource(Resource):
isLeaf = True
def render_GET(self, request):
return "%s" % request.method
r = DummyResource()
factory = Site(r)
self.port = reactor.listenTCP(8880, factory)
def tearDown(self):
self.port.stopListening()
@defer.inlineCallbacks
def test_do_request(self):
http_test = httpt.HTTPTest()
http_test.localOptions['socksproxy'] = None
http_test._setUp()
response = yield http_test.doRequest('http://localhost:8880/')
assert response.body == "GET"
assert len(http_test.report['requests']) == 1
assert 'request' in http_test.report['requests'][0]
assert 'response' in http_test.report['requests'][0]
@defer.inlineCallbacks
def test_do_failing_request(self):
http_test = httpt.HTTPTest()
http_test.localOptions['socksproxy'] = None
http_test._setUp()
yield self.assertFailure(http_test.doRequest('http://invaliddomain/'), DNSLookupError)
assert http_test.report['requests'][0]['failure'] == 'dns_lookup_error'
|
|
e615855f6ea90e63df3eb2ff42f79afa0329ae01
|
migrations/versions/dceb6cd3c41e_.py
|
migrations/versions/dceb6cd3c41e_.py
|
"""Add policycondition table, remove unused condition column from policy table
Revision ID: dceb6cd3c41e
Revises: b9131d0686eb
Create Date: 2019-07-02 12:19:19.646528
"""
# revision identifiers, used by Alembic.
revision = 'dceb6cd3c41e'
down_revision = 'b9131d0686eb'
from alembic import op, context
import sqlalchemy as sa
from sqlalchemy.schema import Sequence, CreateSequence
def dialect_supports_sequences():
migration_context = context.get_context()
return migration_context.dialect.supports_sequences
def create_seq(seq):
if dialect_supports_sequences():
op.execute(CreateSequence(seq))
def upgrade():
try:
try:
create_seq(Sequence('policycondition_seq'))
except Exception as _e:
pass
op.create_table('policycondition',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('policy_id', sa.Integer(), nullable=False),
sa.Column('section', sa.Unicode(length=255), nullable=False),
sa.Column('key', sa.Unicode(length=255), nullable=False),
sa.Column('value', sa.Unicode(length=2000), nullable=True),
sa.Column('comparator', sa.Unicode(length=255), nullable=True),
sa.ForeignKeyConstraint(['policy_id'], ['policy.id'], ),
sa.PrimaryKeyConstraint('id'),
mysql_row_format='DYNAMIC'
)
except Exception as exx:
print("Could not create table policycondition: {!r}".format(exx))
try:
op.drop_column('policy', 'condition')
except Exception as exx:
print("Could not drop column policy.condition: {!r}".format(exx))
def downgrade():
op.add_column('policy', sa.Column('condition', sa.INTEGER(), nullable=False))
op.drop_table('policycondition')
|
Add migration script for policycondition table
|
Add migration script for policycondition table
|
Python
|
agpl-3.0
|
privacyidea/privacyidea,privacyidea/privacyidea,privacyidea/privacyidea,privacyidea/privacyidea,privacyidea/privacyidea,privacyidea/privacyidea
|
Add migration script for policycondition table
|
"""Add policycondition table, remove unused condition column from policy table
Revision ID: dceb6cd3c41e
Revises: b9131d0686eb
Create Date: 2019-07-02 12:19:19.646528
"""
# revision identifiers, used by Alembic.
revision = 'dceb6cd3c41e'
down_revision = 'b9131d0686eb'
from alembic import op, context
import sqlalchemy as sa
from sqlalchemy.schema import Sequence, CreateSequence
def dialect_supports_sequences():
migration_context = context.get_context()
return migration_context.dialect.supports_sequences
def create_seq(seq):
if dialect_supports_sequences():
op.execute(CreateSequence(seq))
def upgrade():
try:
try:
create_seq(Sequence('policycondition_seq'))
except Exception as _e:
pass
op.create_table('policycondition',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('policy_id', sa.Integer(), nullable=False),
sa.Column('section', sa.Unicode(length=255), nullable=False),
sa.Column('key', sa.Unicode(length=255), nullable=False),
sa.Column('value', sa.Unicode(length=2000), nullable=True),
sa.Column('comparator', sa.Unicode(length=255), nullable=True),
sa.ForeignKeyConstraint(['policy_id'], ['policy.id'], ),
sa.PrimaryKeyConstraint('id'),
mysql_row_format='DYNAMIC'
)
except Exception as exx:
print("Could not create table policycondition: {!r}".format(exx))
try:
op.drop_column('policy', 'condition')
except Exception as exx:
print("Could not drop column policy.condition: {!r}".format(exx))
def downgrade():
op.add_column('policy', sa.Column('condition', sa.INTEGER(), nullable=False))
op.drop_table('policycondition')
|
<commit_before><commit_msg>Add migration script for policycondition table<commit_after>
|
"""Add policycondition table, remove unused condition column from policy table
Revision ID: dceb6cd3c41e
Revises: b9131d0686eb
Create Date: 2019-07-02 12:19:19.646528
"""
# revision identifiers, used by Alembic.
revision = 'dceb6cd3c41e'
down_revision = 'b9131d0686eb'
from alembic import op, context
import sqlalchemy as sa
from sqlalchemy.schema import Sequence, CreateSequence
def dialect_supports_sequences():
migration_context = context.get_context()
return migration_context.dialect.supports_sequences
def create_seq(seq):
if dialect_supports_sequences():
op.execute(CreateSequence(seq))
def upgrade():
try:
try:
create_seq(Sequence('policycondition_seq'))
except Exception as _e:
pass
op.create_table('policycondition',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('policy_id', sa.Integer(), nullable=False),
sa.Column('section', sa.Unicode(length=255), nullable=False),
sa.Column('key', sa.Unicode(length=255), nullable=False),
sa.Column('value', sa.Unicode(length=2000), nullable=True),
sa.Column('comparator', sa.Unicode(length=255), nullable=True),
sa.ForeignKeyConstraint(['policy_id'], ['policy.id'], ),
sa.PrimaryKeyConstraint('id'),
mysql_row_format='DYNAMIC'
)
except Exception as exx:
print("Could not create table policycondition: {!r}".format(exx))
try:
op.drop_column('policy', 'condition')
except Exception as exx:
print("Could not drop column policy.condition: {!r}".format(exx))
def downgrade():
op.add_column('policy', sa.Column('condition', sa.INTEGER(), nullable=False))
op.drop_table('policycondition')
|
Add migration script for policycondition table"""Add policycondition table, remove unused condition column from policy table
Revision ID: dceb6cd3c41e
Revises: b9131d0686eb
Create Date: 2019-07-02 12:19:19.646528
"""
# revision identifiers, used by Alembic.
revision = 'dceb6cd3c41e'
down_revision = 'b9131d0686eb'
from alembic import op, context
import sqlalchemy as sa
from sqlalchemy.schema import Sequence, CreateSequence
def dialect_supports_sequences():
migration_context = context.get_context()
return migration_context.dialect.supports_sequences
def create_seq(seq):
if dialect_supports_sequences():
op.execute(CreateSequence(seq))
def upgrade():
try:
try:
create_seq(Sequence('policycondition_seq'))
except Exception as _e:
pass
op.create_table('policycondition',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('policy_id', sa.Integer(), nullable=False),
sa.Column('section', sa.Unicode(length=255), nullable=False),
sa.Column('key', sa.Unicode(length=255), nullable=False),
sa.Column('value', sa.Unicode(length=2000), nullable=True),
sa.Column('comparator', sa.Unicode(length=255), nullable=True),
sa.ForeignKeyConstraint(['policy_id'], ['policy.id'], ),
sa.PrimaryKeyConstraint('id'),
mysql_row_format='DYNAMIC'
)
except Exception as exx:
print("Could not create table policycondition: {!r}".format(exx))
try:
op.drop_column('policy', 'condition')
except Exception as exx:
print("Could not drop column policy.condition: {!r}".format(exx))
def downgrade():
op.add_column('policy', sa.Column('condition', sa.INTEGER(), nullable=False))
op.drop_table('policycondition')
|
<commit_before><commit_msg>Add migration script for policycondition table<commit_after>"""Add policycondition table, remove unused condition column from policy table
Revision ID: dceb6cd3c41e
Revises: b9131d0686eb
Create Date: 2019-07-02 12:19:19.646528
"""
# revision identifiers, used by Alembic.
revision = 'dceb6cd3c41e'
down_revision = 'b9131d0686eb'
from alembic import op, context
import sqlalchemy as sa
from sqlalchemy.schema import Sequence, CreateSequence
def dialect_supports_sequences():
migration_context = context.get_context()
return migration_context.dialect.supports_sequences
def create_seq(seq):
if dialect_supports_sequences():
op.execute(CreateSequence(seq))
def upgrade():
try:
try:
create_seq(Sequence('policycondition_seq'))
except Exception as _e:
pass
op.create_table('policycondition',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('policy_id', sa.Integer(), nullable=False),
sa.Column('section', sa.Unicode(length=255), nullable=False),
sa.Column('key', sa.Unicode(length=255), nullable=False),
sa.Column('value', sa.Unicode(length=2000), nullable=True),
sa.Column('comparator', sa.Unicode(length=255), nullable=True),
sa.ForeignKeyConstraint(['policy_id'], ['policy.id'], ),
sa.PrimaryKeyConstraint('id'),
mysql_row_format='DYNAMIC'
)
except Exception as exx:
print("Could not create table policycondition: {!r}".format(exx))
try:
op.drop_column('policy', 'condition')
except Exception as exx:
print("Could not drop column policy.condition: {!r}".format(exx))
def downgrade():
op.add_column('policy', sa.Column('condition', sa.INTEGER(), nullable=False))
op.drop_table('policycondition')
|
|
805c8a597a57c21ad2c3fb32b313450484d1cde7
|
Runtime/test_Utils.py
|
Runtime/test_Utils.py
|
"""Test package for Utils.py."""
import unittest
import Utils
class TestUtils(unittest.TestCase):
"""Test cases for the Utils."""
def test_tree(self):
"""Test the tree method."""
t = Utils.tree()
t["hello"] = 3
t["foo"]["bar"] = 42
self.assertEqual(t["hello"], 3)
self.assertEqual(t["foo"]["bar"], 42)
if __name__ == "__main__":
unittest.main()
|
Add unit test for Utils.py
|
Add unit test for Utils.py
|
Python
|
mit
|
lnsp/tea,lnsp/tea
|
Add unit test for Utils.py
|
"""Test package for Utils.py."""
import unittest
import Utils
class TestUtils(unittest.TestCase):
"""Test cases for the Utils."""
def test_tree(self):
"""Test the tree method."""
t = Utils.tree()
t["hello"] = 3
t["foo"]["bar"] = 42
self.assertEqual(t["hello"], 3)
self.assertEqual(t["foo"]["bar"], 42)
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add unit test for Utils.py<commit_after>
|
"""Test package for Utils.py."""
import unittest
import Utils
class TestUtils(unittest.TestCase):
"""Test cases for the Utils."""
def test_tree(self):
"""Test the tree method."""
t = Utils.tree()
t["hello"] = 3
t["foo"]["bar"] = 42
self.assertEqual(t["hello"], 3)
self.assertEqual(t["foo"]["bar"], 42)
if __name__ == "__main__":
unittest.main()
|
Add unit test for Utils.py"""Test package for Utils.py."""
import unittest
import Utils
class TestUtils(unittest.TestCase):
"""Test cases for the Utils."""
def test_tree(self):
"""Test the tree method."""
t = Utils.tree()
t["hello"] = 3
t["foo"]["bar"] = 42
self.assertEqual(t["hello"], 3)
self.assertEqual(t["foo"]["bar"], 42)
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add unit test for Utils.py<commit_after>"""Test package for Utils.py."""
import unittest
import Utils
class TestUtils(unittest.TestCase):
"""Test cases for the Utils."""
def test_tree(self):
"""Test the tree method."""
t = Utils.tree()
t["hello"] = 3
t["foo"]["bar"] = 42
self.assertEqual(t["hello"], 3)
self.assertEqual(t["foo"]["bar"], 42)
if __name__ == "__main__":
unittest.main()
|
|
a99a1bad2d7281a172369c8792c05a1c429a3296
|
examples/translations/dutch_test_1.py
|
examples/translations/dutch_test_1.py
|
# Dutch Language Test
from seleniumbase.translate.dutch import Testgeval
class MijnTestklasse(Testgeval):
def test_voorbeeld_1(self):
self.url_openen("https://nl.wikipedia.org/wiki/Hoofdpagina")
self.controleren_element('a[title*="hoofdpagina gaan"]')
self.controleren_tekst("Welkom op Wikipedia", "td.hp-welkom")
self.tekst_bijwerken("#searchInput", "Stroopwafel")
self.klik("#searchButton")
self.controleren_tekst("Stroopwafel", "#firstHeading")
self.controleren_element('img[alt="Stroopwafels"]')
self.tekst_bijwerken("#searchInput", "Rijksmuseum Amsterdam")
self.klik("#searchButton")
self.controleren_tekst("Rijksmuseum", "#firstHeading")
self.controleren_element('img[alt="Het Rijksmuseum"]')
self.terug()
self.controleren_ware("Stroopwafel" in self.huidige_url_ophalen())
self.vooruit()
self.controleren_ware("Rijksmuseum" in self.huidige_url_ophalen())
|
Add a SeleniumBase test in Dutch
|
Add a SeleniumBase test in Dutch
|
Python
|
mit
|
seleniumbase/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase
|
Add a SeleniumBase test in Dutch
|
# Dutch Language Test
from seleniumbase.translate.dutch import Testgeval
class MijnTestklasse(Testgeval):
def test_voorbeeld_1(self):
self.url_openen("https://nl.wikipedia.org/wiki/Hoofdpagina")
self.controleren_element('a[title*="hoofdpagina gaan"]')
self.controleren_tekst("Welkom op Wikipedia", "td.hp-welkom")
self.tekst_bijwerken("#searchInput", "Stroopwafel")
self.klik("#searchButton")
self.controleren_tekst("Stroopwafel", "#firstHeading")
self.controleren_element('img[alt="Stroopwafels"]')
self.tekst_bijwerken("#searchInput", "Rijksmuseum Amsterdam")
self.klik("#searchButton")
self.controleren_tekst("Rijksmuseum", "#firstHeading")
self.controleren_element('img[alt="Het Rijksmuseum"]')
self.terug()
self.controleren_ware("Stroopwafel" in self.huidige_url_ophalen())
self.vooruit()
self.controleren_ware("Rijksmuseum" in self.huidige_url_ophalen())
|
<commit_before><commit_msg>Add a SeleniumBase test in Dutch<commit_after>
|
# Dutch Language Test
from seleniumbase.translate.dutch import Testgeval
class MijnTestklasse(Testgeval):
def test_voorbeeld_1(self):
self.url_openen("https://nl.wikipedia.org/wiki/Hoofdpagina")
self.controleren_element('a[title*="hoofdpagina gaan"]')
self.controleren_tekst("Welkom op Wikipedia", "td.hp-welkom")
self.tekst_bijwerken("#searchInput", "Stroopwafel")
self.klik("#searchButton")
self.controleren_tekst("Stroopwafel", "#firstHeading")
self.controleren_element('img[alt="Stroopwafels"]')
self.tekst_bijwerken("#searchInput", "Rijksmuseum Amsterdam")
self.klik("#searchButton")
self.controleren_tekst("Rijksmuseum", "#firstHeading")
self.controleren_element('img[alt="Het Rijksmuseum"]')
self.terug()
self.controleren_ware("Stroopwafel" in self.huidige_url_ophalen())
self.vooruit()
self.controleren_ware("Rijksmuseum" in self.huidige_url_ophalen())
|
Add a SeleniumBase test in Dutch# Dutch Language Test
from seleniumbase.translate.dutch import Testgeval
class MijnTestklasse(Testgeval):
def test_voorbeeld_1(self):
self.url_openen("https://nl.wikipedia.org/wiki/Hoofdpagina")
self.controleren_element('a[title*="hoofdpagina gaan"]')
self.controleren_tekst("Welkom op Wikipedia", "td.hp-welkom")
self.tekst_bijwerken("#searchInput", "Stroopwafel")
self.klik("#searchButton")
self.controleren_tekst("Stroopwafel", "#firstHeading")
self.controleren_element('img[alt="Stroopwafels"]')
self.tekst_bijwerken("#searchInput", "Rijksmuseum Amsterdam")
self.klik("#searchButton")
self.controleren_tekst("Rijksmuseum", "#firstHeading")
self.controleren_element('img[alt="Het Rijksmuseum"]')
self.terug()
self.controleren_ware("Stroopwafel" in self.huidige_url_ophalen())
self.vooruit()
self.controleren_ware("Rijksmuseum" in self.huidige_url_ophalen())
|
<commit_before><commit_msg>Add a SeleniumBase test in Dutch<commit_after># Dutch Language Test
from seleniumbase.translate.dutch import Testgeval
class MijnTestklasse(Testgeval):
def test_voorbeeld_1(self):
self.url_openen("https://nl.wikipedia.org/wiki/Hoofdpagina")
self.controleren_element('a[title*="hoofdpagina gaan"]')
self.controleren_tekst("Welkom op Wikipedia", "td.hp-welkom")
self.tekst_bijwerken("#searchInput", "Stroopwafel")
self.klik("#searchButton")
self.controleren_tekst("Stroopwafel", "#firstHeading")
self.controleren_element('img[alt="Stroopwafels"]')
self.tekst_bijwerken("#searchInput", "Rijksmuseum Amsterdam")
self.klik("#searchButton")
self.controleren_tekst("Rijksmuseum", "#firstHeading")
self.controleren_element('img[alt="Het Rijksmuseum"]')
self.terug()
self.controleren_ware("Stroopwafel" in self.huidige_url_ophalen())
self.vooruit()
self.controleren_ware("Rijksmuseum" in self.huidige_url_ophalen())
|
|
8b2ff178c7a4fdd9af536e3c92a7e5edb05b21e0
|
docs/other/fix_notebook_html_colour.py
|
docs/other/fix_notebook_html_colour.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
A script to re-enable colour in .html files produced from IPython notebooks.
Based on a script in a GitHub gist with this copyright notice:
#----------------------------------------------------------------------------
# Copyright (c) 2013 - Damián Avila
#
# Distributed under the terms of the Modified BSD License.
#
# A little snippet to fix @media print issue printing slides from IPython
#-----------------------------------------------------------------------------
"""
import io
import sys
notebook = sys.argv[1]
assert notebook.endswith('.html')
# notebook = 'jevans.ipynb'
path = notebook[:-5] + '.html'
flag = u'@media print{*{text-shadow:none !important;color:#000 !important'
with io.open(path, 'r') as in_file:
data = in_file.readlines()
for i, line in enumerate(data):
if line[:64] == flag:
data[i] = data[i].replace('color:#000 !important;', '')
with io.open(path, 'w') as out_file:
out_file.writelines(data)
print("You can now print your slides")
|
Add a script to enable colour in HTML produced from IPython notebooks
|
Add a script to enable colour in HTML produced from IPython notebooks
|
Python
|
mit
|
krischer/python-future,PythonCharmers/python-future,michaelpacer/python-future,krischer/python-future,michaelpacer/python-future,QuLogic/python-future,PythonCharmers/python-future,QuLogic/python-future
|
Add a script to enable colour in HTML produced from IPython notebooks
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
A script to re-enable colour in .html files produced from IPython notebooks.
Based on a script in a GitHub gist with this copyright notice:
#----------------------------------------------------------------------------
# Copyright (c) 2013 - Damián Avila
#
# Distributed under the terms of the Modified BSD License.
#
# A little snippet to fix @media print issue printing slides from IPython
#-----------------------------------------------------------------------------
"""
import io
import sys
notebook = sys.argv[1]
assert notebook.endswith('.html')
# notebook = 'jevans.ipynb'
path = notebook[:-5] + '.html'
flag = u'@media print{*{text-shadow:none !important;color:#000 !important'
with io.open(path, 'r') as in_file:
data = in_file.readlines()
for i, line in enumerate(data):
if line[:64] == flag:
data[i] = data[i].replace('color:#000 !important;', '')
with io.open(path, 'w') as out_file:
out_file.writelines(data)
print("You can now print your slides")
|
<commit_before><commit_msg>Add a script to enable colour in HTML produced from IPython notebooks<commit_after>
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
A script to re-enable colour in .html files produced from IPython notebooks.
Based on a script in a GitHub gist with this copyright notice:
#----------------------------------------------------------------------------
# Copyright (c) 2013 - Damián Avila
#
# Distributed under the terms of the Modified BSD License.
#
# A little snippet to fix @media print issue printing slides from IPython
#-----------------------------------------------------------------------------
"""
import io
import sys
notebook = sys.argv[1]
assert notebook.endswith('.html')
# notebook = 'jevans.ipynb'
path = notebook[:-5] + '.html'
flag = u'@media print{*{text-shadow:none !important;color:#000 !important'
with io.open(path, 'r') as in_file:
data = in_file.readlines()
for i, line in enumerate(data):
if line[:64] == flag:
data[i] = data[i].replace('color:#000 !important;', '')
with io.open(path, 'w') as out_file:
out_file.writelines(data)
print("You can now print your slides")
|
Add a script to enable colour in HTML produced from IPython notebooks#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
A script to re-enable colour in .html files produced from IPython notebooks.
Based on a script in a GitHub gist with this copyright notice:
#----------------------------------------------------------------------------
# Copyright (c) 2013 - Damián Avila
#
# Distributed under the terms of the Modified BSD License.
#
# A little snippet to fix @media print issue printing slides from IPython
#-----------------------------------------------------------------------------
"""
import io
import sys
notebook = sys.argv[1]
assert notebook.endswith('.html')
# notebook = 'jevans.ipynb'
path = notebook[:-5] + '.html'
flag = u'@media print{*{text-shadow:none !important;color:#000 !important'
with io.open(path, 'r') as in_file:
data = in_file.readlines()
for i, line in enumerate(data):
if line[:64] == flag:
data[i] = data[i].replace('color:#000 !important;', '')
with io.open(path, 'w') as out_file:
out_file.writelines(data)
print("You can now print your slides")
|
<commit_before><commit_msg>Add a script to enable colour in HTML produced from IPython notebooks<commit_after>#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
A script to re-enable colour in .html files produced from IPython notebooks.
Based on a script in a GitHub gist with this copyright notice:
#----------------------------------------------------------------------------
# Copyright (c) 2013 - Damián Avila
#
# Distributed under the terms of the Modified BSD License.
#
# A little snippet to fix @media print issue printing slides from IPython
#-----------------------------------------------------------------------------
"""
import io
import sys
notebook = sys.argv[1]
assert notebook.endswith('.html')
# notebook = 'jevans.ipynb'
path = notebook[:-5] + '.html'
flag = u'@media print{*{text-shadow:none !important;color:#000 !important'
with io.open(path, 'r') as in_file:
data = in_file.readlines()
for i, line in enumerate(data):
if line[:64] == flag:
data[i] = data[i].replace('color:#000 !important;', '')
with io.open(path, 'w') as out_file:
out_file.writelines(data)
print("You can now print your slides")
|
|
0566599ded67ea6337a3ee4a99b592de571d2fa6
|
WebSphere/cfgVersionStamp.py
|
WebSphere/cfgVersionStamp.py
|
# cfgVersionStamp.py
# Author: Christoph Stoettner
# E-Mail: christoph.stoettner@stoeps.de
#
# History:
# 20140415 Christoph Stoettner Initial Version
print "\nSet Version Stamp in LotusConnections-config.xml to actual Date and Time\n"
path = raw_input( "Path and Folder where config is temporarily stored: " )
execfile("connectionsConfig.py")
LCConfigService.checkOutConfig(path,AdminControl.getCell())
LCConfigService.updateConfig("versionStamp","")
LCConfigService.checkInConfig(path,AdminControl.getCell())
synchAllNodes()
|
Add new script for version stamp
|
Add new script for version stamp
|
Python
|
apache-2.0
|
stoeps13/ibmcnxscripting,stoeps13/ibmcnxscripting,stoeps13/ibmcnxscripting
|
Add new script for version stamp
|
# cfgVersionStamp.py
# Author: Christoph Stoettner
# E-Mail: christoph.stoettner@stoeps.de
#
# History:
# 20140415 Christoph Stoettner Initial Version
print "\nSet Version Stamp in LotusConnections-config.xml to actual Date and Time\n"
path = raw_input( "Path and Folder where config is temporarily stored: " )
execfile("connectionsConfig.py")
LCConfigService.checkOutConfig(path,AdminControl.getCell())
LCConfigService.updateConfig("versionStamp","")
LCConfigService.checkInConfig(path,AdminControl.getCell())
synchAllNodes()
|
<commit_before><commit_msg>Add new script for version stamp<commit_after>
|
# cfgVersionStamp.py
# Author: Christoph Stoettner
# E-Mail: christoph.stoettner@stoeps.de
#
# History:
# 20140415 Christoph Stoettner Initial Version
print "\nSet Version Stamp in LotusConnections-config.xml to actual Date and Time\n"
path = raw_input( "Path and Folder where config is temporarily stored: " )
execfile("connectionsConfig.py")
LCConfigService.checkOutConfig(path,AdminControl.getCell())
LCConfigService.updateConfig("versionStamp","")
LCConfigService.checkInConfig(path,AdminControl.getCell())
synchAllNodes()
|
Add new script for version stamp# cfgVersionStamp.py
# Author: Christoph Stoettner
# E-Mail: christoph.stoettner@stoeps.de
#
# History:
# 20140415 Christoph Stoettner Initial Version
print "\nSet Version Stamp in LotusConnections-config.xml to actual Date and Time\n"
path = raw_input( "Path and Folder where config is temporarily stored: " )
execfile("connectionsConfig.py")
LCConfigService.checkOutConfig(path,AdminControl.getCell())
LCConfigService.updateConfig("versionStamp","")
LCConfigService.checkInConfig(path,AdminControl.getCell())
synchAllNodes()
|
<commit_before><commit_msg>Add new script for version stamp<commit_after># cfgVersionStamp.py
# Author: Christoph Stoettner
# E-Mail: christoph.stoettner@stoeps.de
#
# History:
# 20140415 Christoph Stoettner Initial Version
print "\nSet Version Stamp in LotusConnections-config.xml to actual Date and Time\n"
path = raw_input( "Path and Folder where config is temporarily stored: " )
execfile("connectionsConfig.py")
LCConfigService.checkOutConfig(path,AdminControl.getCell())
LCConfigService.updateConfig("versionStamp","")
LCConfigService.checkInConfig(path,AdminControl.getCell())
synchAllNodes()
|
|
7bf5471e0cffadf0cc50b67b590398bb2a413725
|
benchmarks/parse.py
|
benchmarks/parse.py
|
#!/usr/bin/env python
import json, sys, time
count = int(sys.argv[1])
for n in sys.argv[2:]:
print '%s:' % n
start = time.time()
fp = open(n)
for i in xrange(count):
fp.seek(0)
val = json.load(fp)
end = time.time()
print ' ', end - start
|
Add a Python json benchmark.
|
Add a Python json benchmark.
Alas, it's about 3x faster than us, due to being written in C.
|
Python
|
bsd-3-clause
|
timmytofu/aeson,lpsmith/aeson
|
Add a Python json benchmark.
Alas, it's about 3x faster than us, due to being written in C.
|
#!/usr/bin/env python
import json, sys, time
count = int(sys.argv[1])
for n in sys.argv[2:]:
print '%s:' % n
start = time.time()
fp = open(n)
for i in xrange(count):
fp.seek(0)
val = json.load(fp)
end = time.time()
print ' ', end - start
|
<commit_before><commit_msg>Add a Python json benchmark.
Alas, it's about 3x faster than us, due to being written in C.<commit_after>
|
#!/usr/bin/env python
import json, sys, time
count = int(sys.argv[1])
for n in sys.argv[2:]:
print '%s:' % n
start = time.time()
fp = open(n)
for i in xrange(count):
fp.seek(0)
val = json.load(fp)
end = time.time()
print ' ', end - start
|
Add a Python json benchmark.
Alas, it's about 3x faster than us, due to being written in C.#!/usr/bin/env python
import json, sys, time
count = int(sys.argv[1])
for n in sys.argv[2:]:
print '%s:' % n
start = time.time()
fp = open(n)
for i in xrange(count):
fp.seek(0)
val = json.load(fp)
end = time.time()
print ' ', end - start
|
<commit_before><commit_msg>Add a Python json benchmark.
Alas, it's about 3x faster than us, due to being written in C.<commit_after>#!/usr/bin/env python
import json, sys, time
count = int(sys.argv[1])
for n in sys.argv[2:]:
print '%s:' % n
start = time.time()
fp = open(n)
for i in xrange(count):
fp.seek(0)
val = json.load(fp)
end = time.time()
print ' ', end - start
|
|
67a76f74ebd535a283da8eebb06c6811e9249ace
|
mda/db.py
|
mda/db.py
|
from sqlalchemy import create_engine
from sqlalchemy_utils.functions import create_database
from docker import Client
from docker.utils import create_host_config
class Database():
def __init__(self):
db_port = 5432
proxy_port = 5432
self.db_name = 'db'
self.proxy_name = 'db_proxy'
proxy_config = create_host_config(
links={'db':'db'}, port_bindings={db_port:('127.0.0.1',proxy_port)} )
self.db_kwargs = {'image': 'postgres:9.4', 'detach': True,
'name': self.db_name }
self.proxy_kwargs = { 'image': 'svendowideit/ambassador',
'host_config': proxy_config, 'detach': True,
'name': self.proxy_name, 'ports': [proxy_port] }
self.cli = Client(base_url='unix://var/run/docker.sock')
self.db_container = []
self.proxy_container = []
def __create_container(self, c_name, c_field, c_args ):
try:
c_res = self.cli.inspect_container( getattr(self,c_name) )
setattr(self, c_field, {'Id': c_res['Id'], 'Warnings': None })
except:
setattr(self, c_field,
self.cli.create_container(**getattr(self,c_args)) )
def create_app(self):
'''Create the database and proxy'''
self.__create_container( 'db_name', 'db_container', 'db_kwargs')
self.__create_container( 'proxy_name', 'proxy_container', 'proxy_kwargs')
def start_app(self):
self.cli.start( self.db_container )
self.cli.start( self.proxy_container )
def stop_app(self):
self.cli.stop( self.db_container )
self.cli.stop( self.proxy_container )
def remove_app(self):
self.cli.remove_container( self.proxy_name )
self.cli.remove_container( self.db_name )
def get_engine(self):
'''Return engine connection to postgres db'''
engine = create_engine('postgresql://postgres@localhost:5432/postgres')
return engine
|
Create docker Postgresql DB and proxy for testing with SQLAlchemy
|
Create docker Postgresql DB and proxy for testing with SQLAlchemy
|
Python
|
apache-2.0
|
mattmcd/PyAnalysis
|
Create docker Postgresql DB and proxy for testing with SQLAlchemy
|
from sqlalchemy import create_engine
from sqlalchemy_utils.functions import create_database
from docker import Client
from docker.utils import create_host_config
class Database():
def __init__(self):
db_port = 5432
proxy_port = 5432
self.db_name = 'db'
self.proxy_name = 'db_proxy'
proxy_config = create_host_config(
links={'db':'db'}, port_bindings={db_port:('127.0.0.1',proxy_port)} )
self.db_kwargs = {'image': 'postgres:9.4', 'detach': True,
'name': self.db_name }
self.proxy_kwargs = { 'image': 'svendowideit/ambassador',
'host_config': proxy_config, 'detach': True,
'name': self.proxy_name, 'ports': [proxy_port] }
self.cli = Client(base_url='unix://var/run/docker.sock')
self.db_container = []
self.proxy_container = []
def __create_container(self, c_name, c_field, c_args ):
try:
c_res = self.cli.inspect_container( getattr(self,c_name) )
setattr(self, c_field, {'Id': c_res['Id'], 'Warnings': None })
except:
setattr(self, c_field,
self.cli.create_container(**getattr(self,c_args)) )
def create_app(self):
'''Create the database and proxy'''
self.__create_container( 'db_name', 'db_container', 'db_kwargs')
self.__create_container( 'proxy_name', 'proxy_container', 'proxy_kwargs')
def start_app(self):
self.cli.start( self.db_container )
self.cli.start( self.proxy_container )
def stop_app(self):
self.cli.stop( self.db_container )
self.cli.stop( self.proxy_container )
def remove_app(self):
self.cli.remove_container( self.proxy_name )
self.cli.remove_container( self.db_name )
def get_engine(self):
'''Return engine connection to postgres db'''
engine = create_engine('postgresql://postgres@localhost:5432/postgres')
return engine
|
<commit_before><commit_msg>Create docker Postgresql DB and proxy for testing with SQLAlchemy<commit_after>
|
from sqlalchemy import create_engine
from sqlalchemy_utils.functions import create_database
from docker import Client
from docker.utils import create_host_config
class Database():
def __init__(self):
db_port = 5432
proxy_port = 5432
self.db_name = 'db'
self.proxy_name = 'db_proxy'
proxy_config = create_host_config(
links={'db':'db'}, port_bindings={db_port:('127.0.0.1',proxy_port)} )
self.db_kwargs = {'image': 'postgres:9.4', 'detach': True,
'name': self.db_name }
self.proxy_kwargs = { 'image': 'svendowideit/ambassador',
'host_config': proxy_config, 'detach': True,
'name': self.proxy_name, 'ports': [proxy_port] }
self.cli = Client(base_url='unix://var/run/docker.sock')
self.db_container = []
self.proxy_container = []
def __create_container(self, c_name, c_field, c_args ):
try:
c_res = self.cli.inspect_container( getattr(self,c_name) )
setattr(self, c_field, {'Id': c_res['Id'], 'Warnings': None })
except:
setattr(self, c_field,
self.cli.create_container(**getattr(self,c_args)) )
def create_app(self):
'''Create the database and proxy'''
self.__create_container( 'db_name', 'db_container', 'db_kwargs')
self.__create_container( 'proxy_name', 'proxy_container', 'proxy_kwargs')
def start_app(self):
self.cli.start( self.db_container )
self.cli.start( self.proxy_container )
def stop_app(self):
self.cli.stop( self.db_container )
self.cli.stop( self.proxy_container )
def remove_app(self):
self.cli.remove_container( self.proxy_name )
self.cli.remove_container( self.db_name )
def get_engine(self):
'''Return engine connection to postgres db'''
engine = create_engine('postgresql://postgres@localhost:5432/postgres')
return engine
|
Create docker Postgresql DB and proxy for testing with SQLAlchemyfrom sqlalchemy import create_engine
from sqlalchemy_utils.functions import create_database
from docker import Client
from docker.utils import create_host_config
class Database():
def __init__(self):
db_port = 5432
proxy_port = 5432
self.db_name = 'db'
self.proxy_name = 'db_proxy'
proxy_config = create_host_config(
links={'db':'db'}, port_bindings={db_port:('127.0.0.1',proxy_port)} )
self.db_kwargs = {'image': 'postgres:9.4', 'detach': True,
'name': self.db_name }
self.proxy_kwargs = { 'image': 'svendowideit/ambassador',
'host_config': proxy_config, 'detach': True,
'name': self.proxy_name, 'ports': [proxy_port] }
self.cli = Client(base_url='unix://var/run/docker.sock')
self.db_container = []
self.proxy_container = []
def __create_container(self, c_name, c_field, c_args ):
try:
c_res = self.cli.inspect_container( getattr(self,c_name) )
setattr(self, c_field, {'Id': c_res['Id'], 'Warnings': None })
except:
setattr(self, c_field,
self.cli.create_container(**getattr(self,c_args)) )
def create_app(self):
'''Create the database and proxy'''
self.__create_container( 'db_name', 'db_container', 'db_kwargs')
self.__create_container( 'proxy_name', 'proxy_container', 'proxy_kwargs')
def start_app(self):
self.cli.start( self.db_container )
self.cli.start( self.proxy_container )
def stop_app(self):
self.cli.stop( self.db_container )
self.cli.stop( self.proxy_container )
def remove_app(self):
self.cli.remove_container( self.proxy_name )
self.cli.remove_container( self.db_name )
def get_engine(self):
'''Return engine connection to postgres db'''
engine = create_engine('postgresql://postgres@localhost:5432/postgres')
return engine
|
<commit_before><commit_msg>Create docker Postgresql DB and proxy for testing with SQLAlchemy<commit_after>from sqlalchemy import create_engine
from sqlalchemy_utils.functions import create_database
from docker import Client
from docker.utils import create_host_config
class Database():
def __init__(self):
db_port = 5432
proxy_port = 5432
self.db_name = 'db'
self.proxy_name = 'db_proxy'
proxy_config = create_host_config(
links={'db':'db'}, port_bindings={db_port:('127.0.0.1',proxy_port)} )
self.db_kwargs = {'image': 'postgres:9.4', 'detach': True,
'name': self.db_name }
self.proxy_kwargs = { 'image': 'svendowideit/ambassador',
'host_config': proxy_config, 'detach': True,
'name': self.proxy_name, 'ports': [proxy_port] }
self.cli = Client(base_url='unix://var/run/docker.sock')
self.db_container = []
self.proxy_container = []
def __create_container(self, c_name, c_field, c_args ):
try:
c_res = self.cli.inspect_container( getattr(self,c_name) )
setattr(self, c_field, {'Id': c_res['Id'], 'Warnings': None })
except:
setattr(self, c_field,
self.cli.create_container(**getattr(self,c_args)) )
def create_app(self):
'''Create the database and proxy'''
self.__create_container( 'db_name', 'db_container', 'db_kwargs')
self.__create_container( 'proxy_name', 'proxy_container', 'proxy_kwargs')
def start_app(self):
self.cli.start( self.db_container )
self.cli.start( self.proxy_container )
def stop_app(self):
self.cli.stop( self.db_container )
self.cli.stop( self.proxy_container )
def remove_app(self):
self.cli.remove_container( self.proxy_name )
self.cli.remove_container( self.db_name )
def get_engine(self):
'''Return engine connection to postgres db'''
engine = create_engine('postgresql://postgres@localhost:5432/postgres')
return engine
|
|
e3079745f37ff84b1be5511986b30d3ca285c15f
|
toyplot/qt.py
|
toyplot/qt.py
|
# Copyright 2014, Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains certain
# rights in this software.
# Note: we prefer PyQt5 only because we've had issues embedding our HTML output
# with specific versions (Qt 4.8.7 on a Mac) of QWebView. Otherwise, the
# functionality is equivalent.
def show(canvas, title="Toyplot Figure"):
"""Display a canvas in a Qt window.
Parameters
----------
canvas: :class:`toyplot.canvas.Canvas`
The canvas to be displayed.
title: string, optional
Optional page title to be displayed in the window.
Notes
-----
The output HTML is generated using :func:`toyplot.html.render`.
"""
try:
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWebKitWidgets import QWebView
except:
from PySide.QtGui import QApplication
from PySide.QtWebKit import QWebView
import numpy
import sys
import toyplot.html
import xml.etree.ElementTree as xml
application = QApplication(sys.argv)
window = QWebView()
window.windowTitle = title
canvas, axes, mark = toyplot.plot(numpy.linspace(0, 1) ** 2)
window.setHtml(xml.tostring(toyplot.html.render(canvas), method="html"))
window.show()
application.exec_()
|
Add an experimental backend to display figures in a standalone Qt window.
|
Add an experimental backend to display figures in a standalone Qt window.
|
Python
|
bsd-3-clause
|
cmorgan/toyplot,cmorgan/toyplot
|
Add an experimental backend to display figures in a standalone Qt window.
|
# Copyright 2014, Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains certain
# rights in this software.
# Note: we prefer PyQt5 only because we've had issues embedding our HTML output
# with specific versions (Qt 4.8.7 on a Mac) of QWebView. Otherwise, the
# functionality is equivalent.
def show(canvas, title="Toyplot Figure"):
"""Display a canvas in a Qt window.
Parameters
----------
canvas: :class:`toyplot.canvas.Canvas`
The canvas to be displayed.
title: string, optional
Optional page title to be displayed in the window.
Notes
-----
The output HTML is generated using :func:`toyplot.html.render`.
"""
try:
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWebKitWidgets import QWebView
except:
from PySide.QtGui import QApplication
from PySide.QtWebKit import QWebView
import numpy
import sys
import toyplot.html
import xml.etree.ElementTree as xml
application = QApplication(sys.argv)
window = QWebView()
window.windowTitle = title
canvas, axes, mark = toyplot.plot(numpy.linspace(0, 1) ** 2)
window.setHtml(xml.tostring(toyplot.html.render(canvas), method="html"))
window.show()
application.exec_()
|
<commit_before><commit_msg>Add an experimental backend to display figures in a standalone Qt window.<commit_after>
|
# Copyright 2014, Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains certain
# rights in this software.
# Note: we prefer PyQt5 only because we've had issues embedding our HTML output
# with specific versions (Qt 4.8.7 on a Mac) of QWebView. Otherwise, the
# functionality is equivalent.
def show(canvas, title="Toyplot Figure"):
"""Display a canvas in a Qt window.
Parameters
----------
canvas: :class:`toyplot.canvas.Canvas`
The canvas to be displayed.
title: string, optional
Optional page title to be displayed in the window.
Notes
-----
The output HTML is generated using :func:`toyplot.html.render`.
"""
try:
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWebKitWidgets import QWebView
except:
from PySide.QtGui import QApplication
from PySide.QtWebKit import QWebView
import numpy
import sys
import toyplot.html
import xml.etree.ElementTree as xml
application = QApplication(sys.argv)
window = QWebView()
window.windowTitle = title
canvas, axes, mark = toyplot.plot(numpy.linspace(0, 1) ** 2)
window.setHtml(xml.tostring(toyplot.html.render(canvas), method="html"))
window.show()
application.exec_()
|
Add an experimental backend to display figures in a standalone Qt window.# Copyright 2014, Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains certain
# rights in this software.
# Note: we prefer PyQt5 only because we've had issues embedding our HTML output
# with specific versions (Qt 4.8.7 on a Mac) of QWebView. Otherwise, the
# functionality is equivalent.
def show(canvas, title="Toyplot Figure"):
"""Display a canvas in a Qt window.
Parameters
----------
canvas: :class:`toyplot.canvas.Canvas`
The canvas to be displayed.
title: string, optional
Optional page title to be displayed in the window.
Notes
-----
The output HTML is generated using :func:`toyplot.html.render`.
"""
try:
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWebKitWidgets import QWebView
except:
from PySide.QtGui import QApplication
from PySide.QtWebKit import QWebView
import numpy
import sys
import toyplot.html
import xml.etree.ElementTree as xml
application = QApplication(sys.argv)
window = QWebView()
window.windowTitle = title
canvas, axes, mark = toyplot.plot(numpy.linspace(0, 1) ** 2)
window.setHtml(xml.tostring(toyplot.html.render(canvas), method="html"))
window.show()
application.exec_()
|
<commit_before><commit_msg>Add an experimental backend to display figures in a standalone Qt window.<commit_after># Copyright 2014, Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains certain
# rights in this software.
# Note: we prefer PyQt5 only because we've had issues embedding our HTML output
# with specific versions (Qt 4.8.7 on a Mac) of QWebView. Otherwise, the
# functionality is equivalent.
def show(canvas, title="Toyplot Figure"):
"""Display a canvas in a Qt window.
Parameters
----------
canvas: :class:`toyplot.canvas.Canvas`
The canvas to be displayed.
title: string, optional
Optional page title to be displayed in the window.
Notes
-----
The output HTML is generated using :func:`toyplot.html.render`.
"""
try:
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWebKitWidgets import QWebView
except:
from PySide.QtGui import QApplication
from PySide.QtWebKit import QWebView
import numpy
import sys
import toyplot.html
import xml.etree.ElementTree as xml
application = QApplication(sys.argv)
window = QWebView()
window.windowTitle = title
canvas, axes, mark = toyplot.plot(numpy.linspace(0, 1) ** 2)
window.setHtml(xml.tostring(toyplot.html.render(canvas), method="html"))
window.show()
application.exec_()
|
|
c9205c0ef75a34e608594a3ef1b733e8a38fdd1a
|
command-line-template.py
|
command-line-template.py
|
from __future__ import print_function
import os
import sys
import argparse
def main(arguments):
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('infile', help="Input file", type=argparse.FileType('r'))
parser.add_argument('-o', '--outfile', help="Output file",
default=sys.stdout, type=argparse.FileType('w'))
args = parser.parse_args(arguments)
print(args)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
Add command line app example
|
Add command line app example
|
Python
|
mit
|
voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts
|
Add command line app example
|
from __future__ import print_function
import os
import sys
import argparse
def main(arguments):
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('infile', help="Input file", type=argparse.FileType('r'))
parser.add_argument('-o', '--outfile', help="Output file",
default=sys.stdout, type=argparse.FileType('w'))
args = parser.parse_args(arguments)
print(args)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
<commit_before><commit_msg>Add command line app example<commit_after>
|
from __future__ import print_function
import os
import sys
import argparse
def main(arguments):
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('infile', help="Input file", type=argparse.FileType('r'))
parser.add_argument('-o', '--outfile', help="Output file",
default=sys.stdout, type=argparse.FileType('w'))
args = parser.parse_args(arguments)
print(args)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
Add command line app examplefrom __future__ import print_function
import os
import sys
import argparse
def main(arguments):
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('infile', help="Input file", type=argparse.FileType('r'))
parser.add_argument('-o', '--outfile', help="Output file",
default=sys.stdout, type=argparse.FileType('w'))
args = parser.parse_args(arguments)
print(args)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
<commit_before><commit_msg>Add command line app example<commit_after>from __future__ import print_function
import os
import sys
import argparse
def main(arguments):
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('infile', help="Input file", type=argparse.FileType('r'))
parser.add_argument('-o', '--outfile', help="Output file",
default=sys.stdout, type=argparse.FileType('w'))
args = parser.parse_args(arguments)
print(args)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
|
d73902fb9fde6e4dac8ed856bf0bf368d3599134
|
test_lxml.py
|
test_lxml.py
|
#!/usr/bin/python
import datetime as dt
import numpy as np
import pandas as pd
from lxml import etree
# test the lxml package in comparision to the standard ElementTree pkg
from crocus_forcing_nc import populate_forcing_nc
# try:
# import xml.etree.cElementTree as ET
# except ImportError:
# import xml.etree.ElementTree as ET
# dictionary pairing MET parameter names to Crocus parameter names
para_dict = {}# {'index': []}
index = []
tree = etree.parse(open('test.xml'))
root = tree.getroot()
for child_of_root in root:
print child_of_root.tag, child_of_root.attrib
# Find timeStamp-tag
for timestamp in tree.iter('timeStamp'):
# Iterate over each item of the timeStamp
for item in timestamp.findall('item'):
# Access its time stamp in the from-tag
tstamp = item.find('from').text
# Convert string to datetime
tstamp = dt.datetime.strptime(tstamp, '%Y-%m-%dT%H:%M:%S.000Z')
index.append(tstamp)
# Access each location-tag and store the station number in its id-tag
loc = item.find('location')
for locitem in loc:
# Get the station ID
stat_id = locitem.find('id').text
# Convert stat_id to an integer
stat_id = np.int(stat_id)
# Access the weatherElement
weather_e = locitem.find('weatherElement')
for weather_i in weather_e:
# Access the parameter ID
param_id = weather_i.find('id').text
if param_id not in para_dict.keys():
para_dict[param_id] = []
# Access the quality tag
quality = weather_i.find('quality').text
# Convert the quality tag to an interger
quality = np.int(quality)
# Access the value
value = weather_i.find('value').text
# Convert the value to float
value = np.float(value)
# Print for testing
#print stat_id, tstamp, param_id, quality, value
para_dict[param_id].append(value)
print para_dict
# Now store the whole shit in a pandas dataframe...
df = pd.DataFrame(para_dict, index=index)
print df
populate_forcing_nc(df)
#df.to_hdf('test_ET.hdf', 'test_from_eklima')
#for elem in tree.iter(tag='value'):
# print elem.tag, elem.attrib, elem.text
|
Use lxml instead of ET
|
Use lxml instead of ET
|
Python
|
mit
|
kmunve/TSanalysis,kmunve/TSanalysis,kmunve/TSanalysis
|
Use lxml instead of ET
|
#!/usr/bin/python
import datetime as dt
import numpy as np
import pandas as pd
from lxml import etree
# test the lxml package in comparision to the standard ElementTree pkg
from crocus_forcing_nc import populate_forcing_nc
# try:
# import xml.etree.cElementTree as ET
# except ImportError:
# import xml.etree.ElementTree as ET
# dictionary pairing MET parameter names to Crocus parameter names
para_dict = {}# {'index': []}
index = []
tree = etree.parse(open('test.xml'))
root = tree.getroot()
for child_of_root in root:
print child_of_root.tag, child_of_root.attrib
# Find timeStamp-tag
for timestamp in tree.iter('timeStamp'):
# Iterate over each item of the timeStamp
for item in timestamp.findall('item'):
# Access its time stamp in the from-tag
tstamp = item.find('from').text
# Convert string to datetime
tstamp = dt.datetime.strptime(tstamp, '%Y-%m-%dT%H:%M:%S.000Z')
index.append(tstamp)
# Access each location-tag and store the station number in its id-tag
loc = item.find('location')
for locitem in loc:
# Get the station ID
stat_id = locitem.find('id').text
# Convert stat_id to an integer
stat_id = np.int(stat_id)
# Access the weatherElement
weather_e = locitem.find('weatherElement')
for weather_i in weather_e:
# Access the parameter ID
param_id = weather_i.find('id').text
if param_id not in para_dict.keys():
para_dict[param_id] = []
# Access the quality tag
quality = weather_i.find('quality').text
# Convert the quality tag to an interger
quality = np.int(quality)
# Access the value
value = weather_i.find('value').text
# Convert the value to float
value = np.float(value)
# Print for testing
#print stat_id, tstamp, param_id, quality, value
para_dict[param_id].append(value)
print para_dict
# Now store the whole shit in a pandas dataframe...
df = pd.DataFrame(para_dict, index=index)
print df
populate_forcing_nc(df)
#df.to_hdf('test_ET.hdf', 'test_from_eklima')
#for elem in tree.iter(tag='value'):
# print elem.tag, elem.attrib, elem.text
|
<commit_before><commit_msg>Use lxml instead of ET<commit_after>
|
#!/usr/bin/python
import datetime as dt
import numpy as np
import pandas as pd
from lxml import etree
# test the lxml package in comparision to the standard ElementTree pkg
from crocus_forcing_nc import populate_forcing_nc
# try:
# import xml.etree.cElementTree as ET
# except ImportError:
# import xml.etree.ElementTree as ET
# dictionary pairing MET parameter names to Crocus parameter names
para_dict = {}# {'index': []}
index = []
tree = etree.parse(open('test.xml'))
root = tree.getroot()
for child_of_root in root:
print child_of_root.tag, child_of_root.attrib
# Find timeStamp-tag
for timestamp in tree.iter('timeStamp'):
# Iterate over each item of the timeStamp
for item in timestamp.findall('item'):
# Access its time stamp in the from-tag
tstamp = item.find('from').text
# Convert string to datetime
tstamp = dt.datetime.strptime(tstamp, '%Y-%m-%dT%H:%M:%S.000Z')
index.append(tstamp)
# Access each location-tag and store the station number in its id-tag
loc = item.find('location')
for locitem in loc:
# Get the station ID
stat_id = locitem.find('id').text
# Convert stat_id to an integer
stat_id = np.int(stat_id)
# Access the weatherElement
weather_e = locitem.find('weatherElement')
for weather_i in weather_e:
# Access the parameter ID
param_id = weather_i.find('id').text
if param_id not in para_dict.keys():
para_dict[param_id] = []
# Access the quality tag
quality = weather_i.find('quality').text
# Convert the quality tag to an interger
quality = np.int(quality)
# Access the value
value = weather_i.find('value').text
# Convert the value to float
value = np.float(value)
# Print for testing
#print stat_id, tstamp, param_id, quality, value
para_dict[param_id].append(value)
print para_dict
# Now store the whole shit in a pandas dataframe...
df = pd.DataFrame(para_dict, index=index)
print df
populate_forcing_nc(df)
#df.to_hdf('test_ET.hdf', 'test_from_eklima')
#for elem in tree.iter(tag='value'):
# print elem.tag, elem.attrib, elem.text
|
Use lxml instead of ET#!/usr/bin/python
import datetime as dt
import numpy as np
import pandas as pd
from lxml import etree
# test the lxml package in comparision to the standard ElementTree pkg
from crocus_forcing_nc import populate_forcing_nc
# try:
# import xml.etree.cElementTree as ET
# except ImportError:
# import xml.etree.ElementTree as ET
# dictionary pairing MET parameter names to Crocus parameter names
para_dict = {}# {'index': []}
index = []
tree = etree.parse(open('test.xml'))
root = tree.getroot()
for child_of_root in root:
print child_of_root.tag, child_of_root.attrib
# Find timeStamp-tag
for timestamp in tree.iter('timeStamp'):
# Iterate over each item of the timeStamp
for item in timestamp.findall('item'):
# Access its time stamp in the from-tag
tstamp = item.find('from').text
# Convert string to datetime
tstamp = dt.datetime.strptime(tstamp, '%Y-%m-%dT%H:%M:%S.000Z')
index.append(tstamp)
# Access each location-tag and store the station number in its id-tag
loc = item.find('location')
for locitem in loc:
# Get the station ID
stat_id = locitem.find('id').text
# Convert stat_id to an integer
stat_id = np.int(stat_id)
# Access the weatherElement
weather_e = locitem.find('weatherElement')
for weather_i in weather_e:
# Access the parameter ID
param_id = weather_i.find('id').text
if param_id not in para_dict.keys():
para_dict[param_id] = []
# Access the quality tag
quality = weather_i.find('quality').text
# Convert the quality tag to an interger
quality = np.int(quality)
# Access the value
value = weather_i.find('value').text
# Convert the value to float
value = np.float(value)
# Print for testing
#print stat_id, tstamp, param_id, quality, value
para_dict[param_id].append(value)
print para_dict
# Now store the whole shit in a pandas dataframe...
df = pd.DataFrame(para_dict, index=index)
print df
populate_forcing_nc(df)
#df.to_hdf('test_ET.hdf', 'test_from_eklima')
#for elem in tree.iter(tag='value'):
# print elem.tag, elem.attrib, elem.text
|
<commit_before><commit_msg>Use lxml instead of ET<commit_after>#!/usr/bin/python
import datetime as dt
import numpy as np
import pandas as pd
from lxml import etree
# test the lxml package in comparision to the standard ElementTree pkg
from crocus_forcing_nc import populate_forcing_nc
# try:
# import xml.etree.cElementTree as ET
# except ImportError:
# import xml.etree.ElementTree as ET
# dictionary pairing MET parameter names to Crocus parameter names
para_dict = {}# {'index': []}
index = []
tree = etree.parse(open('test.xml'))
root = tree.getroot()
for child_of_root in root:
print child_of_root.tag, child_of_root.attrib
# Find timeStamp-tag
for timestamp in tree.iter('timeStamp'):
# Iterate over each item of the timeStamp
for item in timestamp.findall('item'):
# Access its time stamp in the from-tag
tstamp = item.find('from').text
# Convert string to datetime
tstamp = dt.datetime.strptime(tstamp, '%Y-%m-%dT%H:%M:%S.000Z')
index.append(tstamp)
# Access each location-tag and store the station number in its id-tag
loc = item.find('location')
for locitem in loc:
# Get the station ID
stat_id = locitem.find('id').text
# Convert stat_id to an integer
stat_id = np.int(stat_id)
# Access the weatherElement
weather_e = locitem.find('weatherElement')
for weather_i in weather_e:
# Access the parameter ID
param_id = weather_i.find('id').text
if param_id not in para_dict.keys():
para_dict[param_id] = []
# Access the quality tag
quality = weather_i.find('quality').text
# Convert the quality tag to an interger
quality = np.int(quality)
# Access the value
value = weather_i.find('value').text
# Convert the value to float
value = np.float(value)
# Print for testing
#print stat_id, tstamp, param_id, quality, value
para_dict[param_id].append(value)
print para_dict
# Now store the whole shit in a pandas dataframe...
df = pd.DataFrame(para_dict, index=index)
print df
populate_forcing_nc(df)
#df.to_hdf('test_ET.hdf', 'test_from_eklima')
#for elem in tree.iter(tag='value'):
# print elem.tag, elem.attrib, elem.text
|
|
96616feb46dc8342f742bc63f075477dc1b325e6
|
modules/data_generator.py
|
modules/data_generator.py
|
from __future__ import print_function
import requests
import json
class Table(object):
def __init__(self, x, y, width, height, name):
self.name = name
self.position_x = x
self.position_y = y
self.width = width
self.height = height
def json(self):
return json.dumps(self.__dict__)
class DataTableGenerator(object):
def __init__(self):
self.url = "http://bleepr.io/tables"
def get_tables(self):
r = requests.get(self.url)
print(r.status_code)
print(r.headers['content-type'])
return r.json()
def insert_table(self, table):
print(table.json())
headers = {'Content-type': 'application/json',
'Accept': 'text/plain'}
r = requests.post(self.url, data=table.json(),
headers=headers)
if(r.status_code != 406):
raise Exception("POST response received was %s" % r.status_code)
def edit_table(self, id):
return
def delete_table(self, id):
r = requests.delete(self.url + "/" + str(id))
if (r.status_code != 204):
raise Exception("DELETE response received was %s" % r.status_code)
def clear_database(self):
data = self.get_tables()
for d in data:
self.delete_table(d["id"])
class CostumerDataGenerator(object):
def get_costumers(self):
return None
def insert_costumer(self, costumer):
return None
def edit_costumer(self, id):
return None
def delete_costumer(self, id):
return None
class CardsDataGenerator(object):
def get_cards(self):
return None
def insert_card(self, card):
return None
def edit_card(self, id):
return None
def delete_card(self, id):
return
def slip():
dt = DataTableGenerator()
dt.clear_database()
t = Table(45,249,80,485, "One")
dt.insert_table(t)
t = Table(300,60,180,100, "Two")
dt.insert_table(t)
t = Table(480,60,180,100, "Three")
dt.insert_table(t)
t = Table(220,440,180,100, "Four")
dt.insert_table(t)
t = Table(400,440,180,100, "Five")
dt.insert_table(t)
t = Table(300,250,180,100, "Six")
dt.insert_table(t)
def main():
slip()
if __name__ == "__main__":
main()
|
Add generator for tables plus SLIP room config
|
Add generator for tables plus SLIP room config
|
Python
|
mit
|
bleepr/bleepr-manage,bleepr/bleepr-manage
|
Add generator for tables plus SLIP room config
|
from __future__ import print_function
import requests
import json
class Table(object):
def __init__(self, x, y, width, height, name):
self.name = name
self.position_x = x
self.position_y = y
self.width = width
self.height = height
def json(self):
return json.dumps(self.__dict__)
class DataTableGenerator(object):
def __init__(self):
self.url = "http://bleepr.io/tables"
def get_tables(self):
r = requests.get(self.url)
print(r.status_code)
print(r.headers['content-type'])
return r.json()
def insert_table(self, table):
print(table.json())
headers = {'Content-type': 'application/json',
'Accept': 'text/plain'}
r = requests.post(self.url, data=table.json(),
headers=headers)
if(r.status_code != 406):
raise Exception("POST response received was %s" % r.status_code)
def edit_table(self, id):
return
def delete_table(self, id):
r = requests.delete(self.url + "/" + str(id))
if (r.status_code != 204):
raise Exception("DELETE response received was %s" % r.status_code)
def clear_database(self):
data = self.get_tables()
for d in data:
self.delete_table(d["id"])
class CostumerDataGenerator(object):
def get_costumers(self):
return None
def insert_costumer(self, costumer):
return None
def edit_costumer(self, id):
return None
def delete_costumer(self, id):
return None
class CardsDataGenerator(object):
def get_cards(self):
return None
def insert_card(self, card):
return None
def edit_card(self, id):
return None
def delete_card(self, id):
return
def slip():
dt = DataTableGenerator()
dt.clear_database()
t = Table(45,249,80,485, "One")
dt.insert_table(t)
t = Table(300,60,180,100, "Two")
dt.insert_table(t)
t = Table(480,60,180,100, "Three")
dt.insert_table(t)
t = Table(220,440,180,100, "Four")
dt.insert_table(t)
t = Table(400,440,180,100, "Five")
dt.insert_table(t)
t = Table(300,250,180,100, "Six")
dt.insert_table(t)
def main():
slip()
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add generator for tables plus SLIP room config<commit_after>
|
from __future__ import print_function
import requests
import json
class Table(object):
def __init__(self, x, y, width, height, name):
self.name = name
self.position_x = x
self.position_y = y
self.width = width
self.height = height
def json(self):
return json.dumps(self.__dict__)
class DataTableGenerator(object):
def __init__(self):
self.url = "http://bleepr.io/tables"
def get_tables(self):
r = requests.get(self.url)
print(r.status_code)
print(r.headers['content-type'])
return r.json()
def insert_table(self, table):
print(table.json())
headers = {'Content-type': 'application/json',
'Accept': 'text/plain'}
r = requests.post(self.url, data=table.json(),
headers=headers)
if(r.status_code != 406):
raise Exception("POST response received was %s" % r.status_code)
def edit_table(self, id):
return
def delete_table(self, id):
r = requests.delete(self.url + "/" + str(id))
if (r.status_code != 204):
raise Exception("DELETE response received was %s" % r.status_code)
def clear_database(self):
data = self.get_tables()
for d in data:
self.delete_table(d["id"])
class CostumerDataGenerator(object):
def get_costumers(self):
return None
def insert_costumer(self, costumer):
return None
def edit_costumer(self, id):
return None
def delete_costumer(self, id):
return None
class CardsDataGenerator(object):
def get_cards(self):
return None
def insert_card(self, card):
return None
def edit_card(self, id):
return None
def delete_card(self, id):
return
def slip():
dt = DataTableGenerator()
dt.clear_database()
t = Table(45,249,80,485, "One")
dt.insert_table(t)
t = Table(300,60,180,100, "Two")
dt.insert_table(t)
t = Table(480,60,180,100, "Three")
dt.insert_table(t)
t = Table(220,440,180,100, "Four")
dt.insert_table(t)
t = Table(400,440,180,100, "Five")
dt.insert_table(t)
t = Table(300,250,180,100, "Six")
dt.insert_table(t)
def main():
slip()
if __name__ == "__main__":
main()
|
Add generator for tables plus SLIP room configfrom __future__ import print_function
import requests
import json
class Table(object):
def __init__(self, x, y, width, height, name):
self.name = name
self.position_x = x
self.position_y = y
self.width = width
self.height = height
def json(self):
return json.dumps(self.__dict__)
class DataTableGenerator(object):
def __init__(self):
self.url = "http://bleepr.io/tables"
def get_tables(self):
r = requests.get(self.url)
print(r.status_code)
print(r.headers['content-type'])
return r.json()
def insert_table(self, table):
print(table.json())
headers = {'Content-type': 'application/json',
'Accept': 'text/plain'}
r = requests.post(self.url, data=table.json(),
headers=headers)
if(r.status_code != 406):
raise Exception("POST response received was %s" % r.status_code)
def edit_table(self, id):
return
def delete_table(self, id):
r = requests.delete(self.url + "/" + str(id))
if (r.status_code != 204):
raise Exception("DELETE response received was %s" % r.status_code)
def clear_database(self):
data = self.get_tables()
for d in data:
self.delete_table(d["id"])
class CostumerDataGenerator(object):
def get_costumers(self):
return None
def insert_costumer(self, costumer):
return None
def edit_costumer(self, id):
return None
def delete_costumer(self, id):
return None
class CardsDataGenerator(object):
def get_cards(self):
return None
def insert_card(self, card):
return None
def edit_card(self, id):
return None
def delete_card(self, id):
return
def slip():
dt = DataTableGenerator()
dt.clear_database()
t = Table(45,249,80,485, "One")
dt.insert_table(t)
t = Table(300,60,180,100, "Two")
dt.insert_table(t)
t = Table(480,60,180,100, "Three")
dt.insert_table(t)
t = Table(220,440,180,100, "Four")
dt.insert_table(t)
t = Table(400,440,180,100, "Five")
dt.insert_table(t)
t = Table(300,250,180,100, "Six")
dt.insert_table(t)
def main():
slip()
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add generator for tables plus SLIP room config<commit_after>from __future__ import print_function
import requests
import json
class Table(object):
def __init__(self, x, y, width, height, name):
self.name = name
self.position_x = x
self.position_y = y
self.width = width
self.height = height
def json(self):
return json.dumps(self.__dict__)
class DataTableGenerator(object):
def __init__(self):
self.url = "http://bleepr.io/tables"
def get_tables(self):
r = requests.get(self.url)
print(r.status_code)
print(r.headers['content-type'])
return r.json()
def insert_table(self, table):
print(table.json())
headers = {'Content-type': 'application/json',
'Accept': 'text/plain'}
r = requests.post(self.url, data=table.json(),
headers=headers)
if(r.status_code != 406):
raise Exception("POST response received was %s" % r.status_code)
def edit_table(self, id):
return
def delete_table(self, id):
r = requests.delete(self.url + "/" + str(id))
if (r.status_code != 204):
raise Exception("DELETE response received was %s" % r.status_code)
def clear_database(self):
data = self.get_tables()
for d in data:
self.delete_table(d["id"])
class CostumerDataGenerator(object):
def get_costumers(self):
return None
def insert_costumer(self, costumer):
return None
def edit_costumer(self, id):
return None
def delete_costumer(self, id):
return None
class CardsDataGenerator(object):
def get_cards(self):
return None
def insert_card(self, card):
return None
def edit_card(self, id):
return None
def delete_card(self, id):
return
def slip():
dt = DataTableGenerator()
dt.clear_database()
t = Table(45,249,80,485, "One")
dt.insert_table(t)
t = Table(300,60,180,100, "Two")
dt.insert_table(t)
t = Table(480,60,180,100, "Three")
dt.insert_table(t)
t = Table(220,440,180,100, "Four")
dt.insert_table(t)
t = Table(400,440,180,100, "Five")
dt.insert_table(t)
t = Table(300,250,180,100, "Six")
dt.insert_table(t)
def main():
slip()
if __name__ == "__main__":
main()
|
|
4ff9ac54cdcc5ed4fb63dfe2b8913c656464b0c7
|
pitchfork/context_functions.py
|
pitchfork/context_functions.py
|
import re
def utility_processor():
def unslug(string):
return re.sub('_', ' ', string)
def parse_field_data(value):
choices = re.sub('\r\n', ',', value)
return choices.split(',')
def slugify(data):
temp_string = re.sub(' +', ' ', str(data.strip()))
return re.sub(' ', '_', temp_string)
return dict(
parse_field_data=parse_field_data,
unslug=unslug,
slugify=slugify
)
|
Move utility processors out of init file
|
Move utility processors out of init file
|
Python
|
apache-2.0
|
rackerlabs/pitchfork,rackerlabs/pitchfork,oldarmyc/pitchfork,oldarmyc/pitchfork,oldarmyc/pitchfork,rackerlabs/pitchfork
|
Move utility processors out of init file
|
import re
def utility_processor():
def unslug(string):
return re.sub('_', ' ', string)
def parse_field_data(value):
choices = re.sub('\r\n', ',', value)
return choices.split(',')
def slugify(data):
temp_string = re.sub(' +', ' ', str(data.strip()))
return re.sub(' ', '_', temp_string)
return dict(
parse_field_data=parse_field_data,
unslug=unslug,
slugify=slugify
)
|
<commit_before><commit_msg>Move utility processors out of init file<commit_after>
|
import re
def utility_processor():
def unslug(string):
return re.sub('_', ' ', string)
def parse_field_data(value):
choices = re.sub('\r\n', ',', value)
return choices.split(',')
def slugify(data):
temp_string = re.sub(' +', ' ', str(data.strip()))
return re.sub(' ', '_', temp_string)
return dict(
parse_field_data=parse_field_data,
unslug=unslug,
slugify=slugify
)
|
Move utility processors out of init file
import re
def utility_processor():
def unslug(string):
return re.sub('_', ' ', string)
def parse_field_data(value):
choices = re.sub('\r\n', ',', value)
return choices.split(',')
def slugify(data):
temp_string = re.sub(' +', ' ', str(data.strip()))
return re.sub(' ', '_', temp_string)
return dict(
parse_field_data=parse_field_data,
unslug=unslug,
slugify=slugify
)
|
<commit_before><commit_msg>Move utility processors out of init file<commit_after>
import re
def utility_processor():
def unslug(string):
return re.sub('_', ' ', string)
def parse_field_data(value):
choices = re.sub('\r\n', ',', value)
return choices.split(',')
def slugify(data):
temp_string = re.sub(' +', ' ', str(data.strip()))
return re.sub(' ', '_', temp_string)
return dict(
parse_field_data=parse_field_data,
unslug=unslug,
slugify=slugify
)
|
|
e2dd78c608660545529f63f82c7209b2f49eb5a7
|
tests/test_compile_samples.py
|
tests/test_compile_samples.py
|
import os.path
import pytest
import rain.compiler as C
def ls(*path):
path = os.path.join(*path)
for file in os.listdir(path):
yield os.path.join(path, file)
def lsrn(*path, recurse=False):
for file in ls(*path):
if os.path.isfile(file) and file.endswith('.rn') and not file.endswith('_pkg.rn'):
yield file
elif recurse and os.path.isdir(file):
yield from lsrn(file, recurse=recurse)
@pytest.mark.parametrize('src', lsrn('samples', recurse=True))
def test_sample(src):
comp = C.get_compiler(src, main=True, quiet=True)
comp.goodies()
comp.compile()
|
Add compilation tests for all samples
|
tests: Add compilation tests for all samples
|
Python
|
mit
|
scizzorz/rain,philipdexter/rain,scizzorz/rain,scizzorz/rain,philipdexter/rain,philipdexter/rain,scizzorz/rain,philipdexter/rain
|
tests: Add compilation tests for all samples
|
import os.path
import pytest
import rain.compiler as C
def ls(*path):
path = os.path.join(*path)
for file in os.listdir(path):
yield os.path.join(path, file)
def lsrn(*path, recurse=False):
for file in ls(*path):
if os.path.isfile(file) and file.endswith('.rn') and not file.endswith('_pkg.rn'):
yield file
elif recurse and os.path.isdir(file):
yield from lsrn(file, recurse=recurse)
@pytest.mark.parametrize('src', lsrn('samples', recurse=True))
def test_sample(src):
comp = C.get_compiler(src, main=True, quiet=True)
comp.goodies()
comp.compile()
|
<commit_before><commit_msg>tests: Add compilation tests for all samples<commit_after>
|
import os.path
import pytest
import rain.compiler as C
def ls(*path):
path = os.path.join(*path)
for file in os.listdir(path):
yield os.path.join(path, file)
def lsrn(*path, recurse=False):
for file in ls(*path):
if os.path.isfile(file) and file.endswith('.rn') and not file.endswith('_pkg.rn'):
yield file
elif recurse and os.path.isdir(file):
yield from lsrn(file, recurse=recurse)
@pytest.mark.parametrize('src', lsrn('samples', recurse=True))
def test_sample(src):
comp = C.get_compiler(src, main=True, quiet=True)
comp.goodies()
comp.compile()
|
tests: Add compilation tests for all samplesimport os.path
import pytest
import rain.compiler as C
def ls(*path):
path = os.path.join(*path)
for file in os.listdir(path):
yield os.path.join(path, file)
def lsrn(*path, recurse=False):
for file in ls(*path):
if os.path.isfile(file) and file.endswith('.rn') and not file.endswith('_pkg.rn'):
yield file
elif recurse and os.path.isdir(file):
yield from lsrn(file, recurse=recurse)
@pytest.mark.parametrize('src', lsrn('samples', recurse=True))
def test_sample(src):
comp = C.get_compiler(src, main=True, quiet=True)
comp.goodies()
comp.compile()
|
<commit_before><commit_msg>tests: Add compilation tests for all samples<commit_after>import os.path
import pytest
import rain.compiler as C
def ls(*path):
path = os.path.join(*path)
for file in os.listdir(path):
yield os.path.join(path, file)
def lsrn(*path, recurse=False):
for file in ls(*path):
if os.path.isfile(file) and file.endswith('.rn') and not file.endswith('_pkg.rn'):
yield file
elif recurse and os.path.isdir(file):
yield from lsrn(file, recurse=recurse)
@pytest.mark.parametrize('src', lsrn('samples', recurse=True))
def test_sample(src):
comp = C.get_compiler(src, main=True, quiet=True)
comp.goodies()
comp.compile()
|
|
ee44a827787cc66b64b6507601adc374e0b087de
|
examples/ctf_demo.py
|
examples/ctf_demo.py
|
"""
This demonstrates the `CtfEditor` widget.
To use: right-click in the window to bring up a context menu. Once you've added
a color or opacity, you can drag them around by just clicking on them. The
colors at the end points are editable, but cannot be removed.
"""
from traits.etsconfig.api import ETSConfig
ETSConfig.toolkit = 'qt4'
from enable.api import ComponentEditor
from enaml.colors import Color
from enaml.qt.qt_application import QtApplication
from ensemble.ctf.editor import CtfEditor
from traits.api import HasTraits, Instance
from traitsui.api import View, UItem
import traits_enaml
with traits_enaml.imports():
from enaml.widgets.api import FileDialogEx
from enaml.widgets.color_dialog import ColorDialog
def get_color(starting_color=None):
""" Show a color picker to the user and return the color which is selected.
"""
def color_as_tuple(color):
return (color.red/255., color.green/255., color.blue/255.)
def tuple_as_color(tup):
r, g, b = tup
return Color(int(r * 255), int(g * 255), int(b * 255), 255)
dlg_kwargs = {'show_alpha': False}
if starting_color:
dlg_kwargs['current_color'] = tuple_as_color(starting_color)
return color_as_tuple(ColorDialog.get_color(**dlg_kwargs))
def get_filename(action='save'):
function = getattr(FileDialogEx, 'get_' + action + '_file_name')
return function()
class Demo(HasTraits):
ctf = Instance(CtfEditor)
traits_view = View(
UItem('ctf',
editor=ComponentEditor(),
style='custom'),
width=450,
height=150,
title="Color Transfer Function Editor",
resizable=True,
)
if __name__ == "__main__":
ctf = CtfEditor(prompt_color_selection=get_color,
prompt_file_selection=get_filename)
demo = Demo(ctf=ctf)
app = QtApplication()
demo.edit_traits()
app.start()
|
Add a demo script for the CtfEditor.
|
Add a demo script for the CtfEditor.
|
Python
|
bsd-3-clause
|
dmsurti/ensemble
|
Add a demo script for the CtfEditor.
|
"""
This demonstrates the `CtfEditor` widget.
To use: right-click in the window to bring up a context menu. Once you've added
a color or opacity, you can drag them around by just clicking on them. The
colors at the end points are editable, but cannot be removed.
"""
from traits.etsconfig.api import ETSConfig
ETSConfig.toolkit = 'qt4'
from enable.api import ComponentEditor
from enaml.colors import Color
from enaml.qt.qt_application import QtApplication
from ensemble.ctf.editor import CtfEditor
from traits.api import HasTraits, Instance
from traitsui.api import View, UItem
import traits_enaml
with traits_enaml.imports():
from enaml.widgets.api import FileDialogEx
from enaml.widgets.color_dialog import ColorDialog
def get_color(starting_color=None):
""" Show a color picker to the user and return the color which is selected.
"""
def color_as_tuple(color):
return (color.red/255., color.green/255., color.blue/255.)
def tuple_as_color(tup):
r, g, b = tup
return Color(int(r * 255), int(g * 255), int(b * 255), 255)
dlg_kwargs = {'show_alpha': False}
if starting_color:
dlg_kwargs['current_color'] = tuple_as_color(starting_color)
return color_as_tuple(ColorDialog.get_color(**dlg_kwargs))
def get_filename(action='save'):
function = getattr(FileDialogEx, 'get_' + action + '_file_name')
return function()
class Demo(HasTraits):
ctf = Instance(CtfEditor)
traits_view = View(
UItem('ctf',
editor=ComponentEditor(),
style='custom'),
width=450,
height=150,
title="Color Transfer Function Editor",
resizable=True,
)
if __name__ == "__main__":
ctf = CtfEditor(prompt_color_selection=get_color,
prompt_file_selection=get_filename)
demo = Demo(ctf=ctf)
app = QtApplication()
demo.edit_traits()
app.start()
|
<commit_before><commit_msg>Add a demo script for the CtfEditor.<commit_after>
|
"""
This demonstrates the `CtfEditor` widget.
To use: right-click in the window to bring up a context menu. Once you've added
a color or opacity, you can drag them around by just clicking on them. The
colors at the end points are editable, but cannot be removed.
"""
from traits.etsconfig.api import ETSConfig
ETSConfig.toolkit = 'qt4'
from enable.api import ComponentEditor
from enaml.colors import Color
from enaml.qt.qt_application import QtApplication
from ensemble.ctf.editor import CtfEditor
from traits.api import HasTraits, Instance
from traitsui.api import View, UItem
import traits_enaml
with traits_enaml.imports():
from enaml.widgets.api import FileDialogEx
from enaml.widgets.color_dialog import ColorDialog
def get_color(starting_color=None):
""" Show a color picker to the user and return the color which is selected.
"""
def color_as_tuple(color):
return (color.red/255., color.green/255., color.blue/255.)
def tuple_as_color(tup):
r, g, b = tup
return Color(int(r * 255), int(g * 255), int(b * 255), 255)
dlg_kwargs = {'show_alpha': False}
if starting_color:
dlg_kwargs['current_color'] = tuple_as_color(starting_color)
return color_as_tuple(ColorDialog.get_color(**dlg_kwargs))
def get_filename(action='save'):
function = getattr(FileDialogEx, 'get_' + action + '_file_name')
return function()
class Demo(HasTraits):
ctf = Instance(CtfEditor)
traits_view = View(
UItem('ctf',
editor=ComponentEditor(),
style='custom'),
width=450,
height=150,
title="Color Transfer Function Editor",
resizable=True,
)
if __name__ == "__main__":
ctf = CtfEditor(prompt_color_selection=get_color,
prompt_file_selection=get_filename)
demo = Demo(ctf=ctf)
app = QtApplication()
demo.edit_traits()
app.start()
|
Add a demo script for the CtfEditor."""
This demonstrates the `CtfEditor` widget.
To use: right-click in the window to bring up a context menu. Once you've added
a color or opacity, you can drag them around by just clicking on them. The
colors at the end points are editable, but cannot be removed.
"""
from traits.etsconfig.api import ETSConfig
ETSConfig.toolkit = 'qt4'
from enable.api import ComponentEditor
from enaml.colors import Color
from enaml.qt.qt_application import QtApplication
from ensemble.ctf.editor import CtfEditor
from traits.api import HasTraits, Instance
from traitsui.api import View, UItem
import traits_enaml
with traits_enaml.imports():
from enaml.widgets.api import FileDialogEx
from enaml.widgets.color_dialog import ColorDialog
def get_color(starting_color=None):
""" Show a color picker to the user and return the color which is selected.
"""
def color_as_tuple(color):
return (color.red/255., color.green/255., color.blue/255.)
def tuple_as_color(tup):
r, g, b = tup
return Color(int(r * 255), int(g * 255), int(b * 255), 255)
dlg_kwargs = {'show_alpha': False}
if starting_color:
dlg_kwargs['current_color'] = tuple_as_color(starting_color)
return color_as_tuple(ColorDialog.get_color(**dlg_kwargs))
def get_filename(action='save'):
function = getattr(FileDialogEx, 'get_' + action + '_file_name')
return function()
class Demo(HasTraits):
ctf = Instance(CtfEditor)
traits_view = View(
UItem('ctf',
editor=ComponentEditor(),
style='custom'),
width=450,
height=150,
title="Color Transfer Function Editor",
resizable=True,
)
if __name__ == "__main__":
ctf = CtfEditor(prompt_color_selection=get_color,
prompt_file_selection=get_filename)
demo = Demo(ctf=ctf)
app = QtApplication()
demo.edit_traits()
app.start()
|
<commit_before><commit_msg>Add a demo script for the CtfEditor.<commit_after>"""
This demonstrates the `CtfEditor` widget.
To use: right-click in the window to bring up a context menu. Once you've added
a color or opacity, you can drag them around by just clicking on them. The
colors at the end points are editable, but cannot be removed.
"""
from traits.etsconfig.api import ETSConfig
ETSConfig.toolkit = 'qt4'
from enable.api import ComponentEditor
from enaml.colors import Color
from enaml.qt.qt_application import QtApplication
from ensemble.ctf.editor import CtfEditor
from traits.api import HasTraits, Instance
from traitsui.api import View, UItem
import traits_enaml
with traits_enaml.imports():
from enaml.widgets.api import FileDialogEx
from enaml.widgets.color_dialog import ColorDialog
def get_color(starting_color=None):
""" Show a color picker to the user and return the color which is selected.
"""
def color_as_tuple(color):
return (color.red/255., color.green/255., color.blue/255.)
def tuple_as_color(tup):
r, g, b = tup
return Color(int(r * 255), int(g * 255), int(b * 255), 255)
dlg_kwargs = {'show_alpha': False}
if starting_color:
dlg_kwargs['current_color'] = tuple_as_color(starting_color)
return color_as_tuple(ColorDialog.get_color(**dlg_kwargs))
def get_filename(action='save'):
function = getattr(FileDialogEx, 'get_' + action + '_file_name')
return function()
class Demo(HasTraits):
ctf = Instance(CtfEditor)
traits_view = View(
UItem('ctf',
editor=ComponentEditor(),
style='custom'),
width=450,
height=150,
title="Color Transfer Function Editor",
resizable=True,
)
if __name__ == "__main__":
ctf = CtfEditor(prompt_color_selection=get_color,
prompt_file_selection=get_filename)
demo = Demo(ctf=ctf)
app = QtApplication()
demo.edit_traits()
app.start()
|
|
7ce77190028a55bf79f4fe7cbe9c5e7448422a8d
|
duplicate_questions/data/make_dataset.py
|
duplicate_questions/data/make_dataset.py
|
# -*- coding: utf-8 -*-
import os
import click
import logging
from dotenv import find_dotenv, load_dotenv
@click.command()
@click.argument('input_filepath', type=click.Path(exists=True))
@click.argument('output_filepath', type=click.Path())
def main(input_filepath, output_filepath):
""" Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
logger = logging.getLogger(__name__)
logger.info('making final data set from raw data')
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
# not used in this stub but often useful for finding various files
project_dir = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir)
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
load_dotenv(find_dotenv())
main()
|
Add temporary file for dataset creation code
|
Add temporary file for dataset creation code
|
Python
|
mit
|
nelson-liu/paraphrase-id-tensorflow,nelson-liu/paraphrase-id-tensorflow
|
Add temporary file for dataset creation code
|
# -*- coding: utf-8 -*-
import os
import click
import logging
from dotenv import find_dotenv, load_dotenv
@click.command()
@click.argument('input_filepath', type=click.Path(exists=True))
@click.argument('output_filepath', type=click.Path())
def main(input_filepath, output_filepath):
""" Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
logger = logging.getLogger(__name__)
logger.info('making final data set from raw data')
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
# not used in this stub but often useful for finding various files
project_dir = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir)
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
load_dotenv(find_dotenv())
main()
|
<commit_before><commit_msg>Add temporary file for dataset creation code<commit_after>
|
# -*- coding: utf-8 -*-
import os
import click
import logging
from dotenv import find_dotenv, load_dotenv
@click.command()
@click.argument('input_filepath', type=click.Path(exists=True))
@click.argument('output_filepath', type=click.Path())
def main(input_filepath, output_filepath):
""" Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
logger = logging.getLogger(__name__)
logger.info('making final data set from raw data')
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
# not used in this stub but often useful for finding various files
project_dir = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir)
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
load_dotenv(find_dotenv())
main()
|
Add temporary file for dataset creation code# -*- coding: utf-8 -*-
import os
import click
import logging
from dotenv import find_dotenv, load_dotenv
@click.command()
@click.argument('input_filepath', type=click.Path(exists=True))
@click.argument('output_filepath', type=click.Path())
def main(input_filepath, output_filepath):
""" Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
logger = logging.getLogger(__name__)
logger.info('making final data set from raw data')
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
# not used in this stub but often useful for finding various files
project_dir = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir)
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
load_dotenv(find_dotenv())
main()
|
<commit_before><commit_msg>Add temporary file for dataset creation code<commit_after># -*- coding: utf-8 -*-
import os
import click
import logging
from dotenv import find_dotenv, load_dotenv
@click.command()
@click.argument('input_filepath', type=click.Path(exists=True))
@click.argument('output_filepath', type=click.Path())
def main(input_filepath, output_filepath):
""" Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
logger = logging.getLogger(__name__)
logger.info('making final data set from raw data')
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
# not used in this stub but often useful for finding various files
project_dir = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir)
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
load_dotenv(find_dotenv())
main()
|
|
1ac9e6ccaae2086079f3db8cc7d061e54b0ecc7f
|
openquake/baselib/utf8.py
|
openquake/baselib/utf8.py
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2017 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
import sys
try:
import __builtin__
except ImportError:
# Python 3
import builtins as __builtin__
def print(*args, **kwargs):
conv_str = ()
for s in args:
conv_str = s.encode('utf-8').decode(sys.stdout.encoding, 'ignore')
return __builtin__.print(conv_str, **kwargs)
|
Add an UTF-8 helper which overloads print()
|
Add an UTF-8 helper which overloads print()
|
Python
|
agpl-3.0
|
gem/oq-engine,gem/oq-engine,gem/oq-hazardlib,gem/oq-engine,gem/oq-engine,gem/oq-hazardlib,gem/oq-hazardlib,gem/oq-engine
|
Add an UTF-8 helper which overloads print()
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2017 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
import sys
try:
import __builtin__
except ImportError:
# Python 3
import builtins as __builtin__
def print(*args, **kwargs):
conv_str = ()
for s in args:
conv_str = s.encode('utf-8').decode(sys.stdout.encoding, 'ignore')
return __builtin__.print(conv_str, **kwargs)
|
<commit_before><commit_msg>Add an UTF-8 helper which overloads print()<commit_after>
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2017 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
import sys
try:
import __builtin__
except ImportError:
# Python 3
import builtins as __builtin__
def print(*args, **kwargs):
conv_str = ()
for s in args:
conv_str = s.encode('utf-8').decode(sys.stdout.encoding, 'ignore')
return __builtin__.print(conv_str, **kwargs)
|
Add an UTF-8 helper which overloads print()# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2017 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
import sys
try:
import __builtin__
except ImportError:
# Python 3
import builtins as __builtin__
def print(*args, **kwargs):
conv_str = ()
for s in args:
conv_str = s.encode('utf-8').decode(sys.stdout.encoding, 'ignore')
return __builtin__.print(conv_str, **kwargs)
|
<commit_before><commit_msg>Add an UTF-8 helper which overloads print()<commit_after># -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2017 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
import sys
try:
import __builtin__
except ImportError:
# Python 3
import builtins as __builtin__
def print(*args, **kwargs):
conv_str = ()
for s in args:
conv_str = s.encode('utf-8').decode(sys.stdout.encoding, 'ignore')
return __builtin__.print(conv_str, **kwargs)
|
|
f876c410ab39bd348f79ed2a256c09edd4225c56
|
odo/backends/tests/test_dask_array.py
|
odo/backends/tests/test_dask_array.py
|
from __future__ import absolute_import, division, print_function
from odo.backends.dask_array import append, Array, merge
from dask.array.core import insert_to_ooc
from dask import core
from odo import convert, into
from odo.utils import tmpfile
import numpy as np
import bcolz
def eq(a, b):
c = a == b
if isinstance(c, np.ndarray):
c = c.all()
return c
def test_convert():
x = np.arange(600).reshape((20, 30))
d = convert(Array, x, blockshape=(4, 5))
assert isinstance(d, Array)
def test_convert_to_numpy_array():
x = np.arange(600).reshape((20, 30))
d = convert(Array, x, blockshape=(4, 5))
x2 = convert(np.ndarray, d)
assert eq(x, x2)
def test_append_to_array():
x = np.arange(600).reshape((20, 30))
a = into(Array, x, blockshape=(4, 5))
b = bcolz.zeros(shape=(0, 30), dtype=x.dtype)
append(b, a)
assert eq(b[:], x)
with tmpfile('hdf5') as fn:
h = into(fn+'::/data', a)
assert eq(h[:], x)
h.file.close()
def test_into_inplace():
x = np.arange(600).reshape((20, 30))
a = into(Array, x, blockshape=(4, 5))
b = bcolz.zeros(shape=(20, 30), dtype=x.dtype)
append(b, a, inplace=True)
assert eq(b[:], x)
def test_insert_to_ooc():
x = np.arange(600).reshape((20, 30))
y = np.empty(shape=x.shape, dtype=x.dtype)
a = convert(Array, x, blockshape=(4, 5))
dsk = insert_to_ooc(y, a)
core.get(merge(dsk, a.dask), list(dsk.keys()))
assert eq(y, x)
def test__array__():
x = np.arange(600).reshape((20, 30))
d = convert(Array, x, blockshape=(4, 5))
assert eq(x, np.array(d))
|
Migrate tests for dask array conversions from dask package.
|
Migrate tests for dask array conversions from dask package.
|
Python
|
bsd-3-clause
|
Dannnno/odo,Dannnno/odo,ywang007/odo,ContinuumIO/odo,ContinuumIO/odo,ywang007/odo,cpcloud/odo,blaze/odo,cowlicks/odo,blaze/odo,alexmojaki/odo,quantopian/odo,alexmojaki/odo,cpcloud/odo,cowlicks/odo,quantopian/odo
|
Migrate tests for dask array conversions from dask package.
|
from __future__ import absolute_import, division, print_function
from odo.backends.dask_array import append, Array, merge
from dask.array.core import insert_to_ooc
from dask import core
from odo import convert, into
from odo.utils import tmpfile
import numpy as np
import bcolz
def eq(a, b):
c = a == b
if isinstance(c, np.ndarray):
c = c.all()
return c
def test_convert():
x = np.arange(600).reshape((20, 30))
d = convert(Array, x, blockshape=(4, 5))
assert isinstance(d, Array)
def test_convert_to_numpy_array():
x = np.arange(600).reshape((20, 30))
d = convert(Array, x, blockshape=(4, 5))
x2 = convert(np.ndarray, d)
assert eq(x, x2)
def test_append_to_array():
x = np.arange(600).reshape((20, 30))
a = into(Array, x, blockshape=(4, 5))
b = bcolz.zeros(shape=(0, 30), dtype=x.dtype)
append(b, a)
assert eq(b[:], x)
with tmpfile('hdf5') as fn:
h = into(fn+'::/data', a)
assert eq(h[:], x)
h.file.close()
def test_into_inplace():
x = np.arange(600).reshape((20, 30))
a = into(Array, x, blockshape=(4, 5))
b = bcolz.zeros(shape=(20, 30), dtype=x.dtype)
append(b, a, inplace=True)
assert eq(b[:], x)
def test_insert_to_ooc():
x = np.arange(600).reshape((20, 30))
y = np.empty(shape=x.shape, dtype=x.dtype)
a = convert(Array, x, blockshape=(4, 5))
dsk = insert_to_ooc(y, a)
core.get(merge(dsk, a.dask), list(dsk.keys()))
assert eq(y, x)
def test__array__():
x = np.arange(600).reshape((20, 30))
d = convert(Array, x, blockshape=(4, 5))
assert eq(x, np.array(d))
|
<commit_before><commit_msg>Migrate tests for dask array conversions from dask package.<commit_after>
|
from __future__ import absolute_import, division, print_function
from odo.backends.dask_array import append, Array, merge
from dask.array.core import insert_to_ooc
from dask import core
from odo import convert, into
from odo.utils import tmpfile
import numpy as np
import bcolz
def eq(a, b):
c = a == b
if isinstance(c, np.ndarray):
c = c.all()
return c
def test_convert():
x = np.arange(600).reshape((20, 30))
d = convert(Array, x, blockshape=(4, 5))
assert isinstance(d, Array)
def test_convert_to_numpy_array():
x = np.arange(600).reshape((20, 30))
d = convert(Array, x, blockshape=(4, 5))
x2 = convert(np.ndarray, d)
assert eq(x, x2)
def test_append_to_array():
x = np.arange(600).reshape((20, 30))
a = into(Array, x, blockshape=(4, 5))
b = bcolz.zeros(shape=(0, 30), dtype=x.dtype)
append(b, a)
assert eq(b[:], x)
with tmpfile('hdf5') as fn:
h = into(fn+'::/data', a)
assert eq(h[:], x)
h.file.close()
def test_into_inplace():
x = np.arange(600).reshape((20, 30))
a = into(Array, x, blockshape=(4, 5))
b = bcolz.zeros(shape=(20, 30), dtype=x.dtype)
append(b, a, inplace=True)
assert eq(b[:], x)
def test_insert_to_ooc():
x = np.arange(600).reshape((20, 30))
y = np.empty(shape=x.shape, dtype=x.dtype)
a = convert(Array, x, blockshape=(4, 5))
dsk = insert_to_ooc(y, a)
core.get(merge(dsk, a.dask), list(dsk.keys()))
assert eq(y, x)
def test__array__():
x = np.arange(600).reshape((20, 30))
d = convert(Array, x, blockshape=(4, 5))
assert eq(x, np.array(d))
|
Migrate tests for dask array conversions from dask package.from __future__ import absolute_import, division, print_function
from odo.backends.dask_array import append, Array, merge
from dask.array.core import insert_to_ooc
from dask import core
from odo import convert, into
from odo.utils import tmpfile
import numpy as np
import bcolz
def eq(a, b):
c = a == b
if isinstance(c, np.ndarray):
c = c.all()
return c
def test_convert():
x = np.arange(600).reshape((20, 30))
d = convert(Array, x, blockshape=(4, 5))
assert isinstance(d, Array)
def test_convert_to_numpy_array():
x = np.arange(600).reshape((20, 30))
d = convert(Array, x, blockshape=(4, 5))
x2 = convert(np.ndarray, d)
assert eq(x, x2)
def test_append_to_array():
x = np.arange(600).reshape((20, 30))
a = into(Array, x, blockshape=(4, 5))
b = bcolz.zeros(shape=(0, 30), dtype=x.dtype)
append(b, a)
assert eq(b[:], x)
with tmpfile('hdf5') as fn:
h = into(fn+'::/data', a)
assert eq(h[:], x)
h.file.close()
def test_into_inplace():
x = np.arange(600).reshape((20, 30))
a = into(Array, x, blockshape=(4, 5))
b = bcolz.zeros(shape=(20, 30), dtype=x.dtype)
append(b, a, inplace=True)
assert eq(b[:], x)
def test_insert_to_ooc():
x = np.arange(600).reshape((20, 30))
y = np.empty(shape=x.shape, dtype=x.dtype)
a = convert(Array, x, blockshape=(4, 5))
dsk = insert_to_ooc(y, a)
core.get(merge(dsk, a.dask), list(dsk.keys()))
assert eq(y, x)
def test__array__():
x = np.arange(600).reshape((20, 30))
d = convert(Array, x, blockshape=(4, 5))
assert eq(x, np.array(d))
|
<commit_before><commit_msg>Migrate tests for dask array conversions from dask package.<commit_after>from __future__ import absolute_import, division, print_function
from odo.backends.dask_array import append, Array, merge
from dask.array.core import insert_to_ooc
from dask import core
from odo import convert, into
from odo.utils import tmpfile
import numpy as np
import bcolz
def eq(a, b):
c = a == b
if isinstance(c, np.ndarray):
c = c.all()
return c
def test_convert():
x = np.arange(600).reshape((20, 30))
d = convert(Array, x, blockshape=(4, 5))
assert isinstance(d, Array)
def test_convert_to_numpy_array():
x = np.arange(600).reshape((20, 30))
d = convert(Array, x, blockshape=(4, 5))
x2 = convert(np.ndarray, d)
assert eq(x, x2)
def test_append_to_array():
x = np.arange(600).reshape((20, 30))
a = into(Array, x, blockshape=(4, 5))
b = bcolz.zeros(shape=(0, 30), dtype=x.dtype)
append(b, a)
assert eq(b[:], x)
with tmpfile('hdf5') as fn:
h = into(fn+'::/data', a)
assert eq(h[:], x)
h.file.close()
def test_into_inplace():
x = np.arange(600).reshape((20, 30))
a = into(Array, x, blockshape=(4, 5))
b = bcolz.zeros(shape=(20, 30), dtype=x.dtype)
append(b, a, inplace=True)
assert eq(b[:], x)
def test_insert_to_ooc():
x = np.arange(600).reshape((20, 30))
y = np.empty(shape=x.shape, dtype=x.dtype)
a = convert(Array, x, blockshape=(4, 5))
dsk = insert_to_ooc(y, a)
core.get(merge(dsk, a.dask), list(dsk.keys()))
assert eq(y, x)
def test__array__():
x = np.arange(600).reshape((20, 30))
d = convert(Array, x, blockshape=(4, 5))
assert eq(x, np.array(d))
|
|
264bd5630c634fd37343cf5ade506aa3447f4751
|
ftfy/test_unicode.py
|
ftfy/test_unicode.py
|
# -*- coding: utf-8 -*-
from ftfy import fix_bad_encoding, WINDOWS_1252_GREMLINS
import unicodedata
# Most single-character strings which have been misencoded should be restored.
def test_all_bmp_characters():
for index in xrange(0xa0, 0xfffd):
char = unichr(index)
# Exclude code points that are not assigned
if unicodedata.category(char) not in ('Co', 'Cn'):
garble = char.encode('utf-8').decode('latin-1')
assert fix_bad_encoding(garble) == char
phrases = [
u"\u201CI'm not such a fan of Charlotte Brontë\u2026\u201D",
u"\u201CI'm not such a fan of Charlotte Brontë\u2026\u201D",
u"\u2039ALLÍ ESTÁ\u203A",
u"\u2014ALLÍ ESTÁ\u2014",
u"AHÅ™, the new sofa from IKEA®",
#u"\u2014a radius of 10 Å\u2014",
]
# These phrases should not be erroneously "fixed"
def test_valid_phrases():
for phrase in phrases:
print phrase
yield check_phrase, phrase
# make it not just confirm based on the opening punctuation
yield check_phrase, phrase[1:]
def check_phrase(text):
assert fix_bad_encoding(text) == text, text
|
Move this test from metanl.
|
Move this test from metanl.
|
Python
|
mit
|
LuminosoInsight/python-ftfy
|
Move this test from metanl.
|
# -*- coding: utf-8 -*-
from ftfy import fix_bad_encoding, WINDOWS_1252_GREMLINS
import unicodedata
# Most single-character strings which have been misencoded should be restored.
def test_all_bmp_characters():
for index in xrange(0xa0, 0xfffd):
char = unichr(index)
# Exclude code points that are not assigned
if unicodedata.category(char) not in ('Co', 'Cn'):
garble = char.encode('utf-8').decode('latin-1')
assert fix_bad_encoding(garble) == char
phrases = [
u"\u201CI'm not such a fan of Charlotte Brontë\u2026\u201D",
u"\u201CI'm not such a fan of Charlotte Brontë\u2026\u201D",
u"\u2039ALLÍ ESTÁ\u203A",
u"\u2014ALLÍ ESTÁ\u2014",
u"AHÅ™, the new sofa from IKEA®",
#u"\u2014a radius of 10 Å\u2014",
]
# These phrases should not be erroneously "fixed"
def test_valid_phrases():
for phrase in phrases:
print phrase
yield check_phrase, phrase
# make it not just confirm based on the opening punctuation
yield check_phrase, phrase[1:]
def check_phrase(text):
assert fix_bad_encoding(text) == text, text
|
<commit_before><commit_msg>Move this test from metanl.<commit_after>
|
# -*- coding: utf-8 -*-
from ftfy import fix_bad_encoding, WINDOWS_1252_GREMLINS
import unicodedata
# Most single-character strings which have been misencoded should be restored.
def test_all_bmp_characters():
for index in xrange(0xa0, 0xfffd):
char = unichr(index)
# Exclude code points that are not assigned
if unicodedata.category(char) not in ('Co', 'Cn'):
garble = char.encode('utf-8').decode('latin-1')
assert fix_bad_encoding(garble) == char
phrases = [
u"\u201CI'm not such a fan of Charlotte Brontë\u2026\u201D",
u"\u201CI'm not such a fan of Charlotte Brontë\u2026\u201D",
u"\u2039ALLÍ ESTÁ\u203A",
u"\u2014ALLÍ ESTÁ\u2014",
u"AHÅ™, the new sofa from IKEA®",
#u"\u2014a radius of 10 Å\u2014",
]
# These phrases should not be erroneously "fixed"
def test_valid_phrases():
for phrase in phrases:
print phrase
yield check_phrase, phrase
# make it not just confirm based on the opening punctuation
yield check_phrase, phrase[1:]
def check_phrase(text):
assert fix_bad_encoding(text) == text, text
|
Move this test from metanl.# -*- coding: utf-8 -*-
from ftfy import fix_bad_encoding, WINDOWS_1252_GREMLINS
import unicodedata
# Most single-character strings which have been misencoded should be restored.
def test_all_bmp_characters():
for index in xrange(0xa0, 0xfffd):
char = unichr(index)
# Exclude code points that are not assigned
if unicodedata.category(char) not in ('Co', 'Cn'):
garble = char.encode('utf-8').decode('latin-1')
assert fix_bad_encoding(garble) == char
phrases = [
u"\u201CI'm not such a fan of Charlotte Brontë\u2026\u201D",
u"\u201CI'm not such a fan of Charlotte Brontë\u2026\u201D",
u"\u2039ALLÍ ESTÁ\u203A",
u"\u2014ALLÍ ESTÁ\u2014",
u"AHÅ™, the new sofa from IKEA®",
#u"\u2014a radius of 10 Å\u2014",
]
# These phrases should not be erroneously "fixed"
def test_valid_phrases():
for phrase in phrases:
print phrase
yield check_phrase, phrase
# make it not just confirm based on the opening punctuation
yield check_phrase, phrase[1:]
def check_phrase(text):
assert fix_bad_encoding(text) == text, text
|
<commit_before><commit_msg>Move this test from metanl.<commit_after># -*- coding: utf-8 -*-
from ftfy import fix_bad_encoding, WINDOWS_1252_GREMLINS
import unicodedata
# Most single-character strings which have been misencoded should be restored.
def test_all_bmp_characters():
for index in xrange(0xa0, 0xfffd):
char = unichr(index)
# Exclude code points that are not assigned
if unicodedata.category(char) not in ('Co', 'Cn'):
garble = char.encode('utf-8').decode('latin-1')
assert fix_bad_encoding(garble) == char
phrases = [
u"\u201CI'm not such a fan of Charlotte Brontë\u2026\u201D",
u"\u201CI'm not such a fan of Charlotte Brontë\u2026\u201D",
u"\u2039ALLÍ ESTÁ\u203A",
u"\u2014ALLÍ ESTÁ\u2014",
u"AHÅ™, the new sofa from IKEA®",
#u"\u2014a radius of 10 Å\u2014",
]
# These phrases should not be erroneously "fixed"
def test_valid_phrases():
for phrase in phrases:
print phrase
yield check_phrase, phrase
# make it not just confirm based on the opening punctuation
yield check_phrase, phrase[1:]
def check_phrase(text):
assert fix_bad_encoding(text) == text, text
|
|
dbfef6d04bdb8031a5f47e27e1370dfa6b797e71
|
python/uglyNumberII.py
|
python/uglyNumberII.py
|
# https://leetcode.com/problems/ugly-number-ii/
class Solution(object):
def nthUglyNumber(self, n):
"""
:type n: int
:rtype: int
"""
ugly = [1] * n
i2 = i3 = i5 = -1
x = v2 = v3 = v5 = 1
for k in xrange(n):
x = min(v2, v3, v5)
ugly[k] = x
if x == v2:
i2 += 1
v2 = ugly[i2] * 2
if x == v3:
i3 += 1
v3 = ugly[i3] * 3
if x == v5:
i5 += 1
v5 = ugly[i5] * 5
return x
|
Add Problem Ugly Number II
|
Add Problem Ugly Number II
|
Python
|
mit
|
guozengxin/myleetcode,guozengxin/myleetcode
|
Add Problem Ugly Number II
|
# https://leetcode.com/problems/ugly-number-ii/
class Solution(object):
def nthUglyNumber(self, n):
"""
:type n: int
:rtype: int
"""
ugly = [1] * n
i2 = i3 = i5 = -1
x = v2 = v3 = v5 = 1
for k in xrange(n):
x = min(v2, v3, v5)
ugly[k] = x
if x == v2:
i2 += 1
v2 = ugly[i2] * 2
if x == v3:
i3 += 1
v3 = ugly[i3] * 3
if x == v5:
i5 += 1
v5 = ugly[i5] * 5
return x
|
<commit_before><commit_msg>Add Problem Ugly Number II<commit_after>
|
# https://leetcode.com/problems/ugly-number-ii/
class Solution(object):
def nthUglyNumber(self, n):
"""
:type n: int
:rtype: int
"""
ugly = [1] * n
i2 = i3 = i5 = -1
x = v2 = v3 = v5 = 1
for k in xrange(n):
x = min(v2, v3, v5)
ugly[k] = x
if x == v2:
i2 += 1
v2 = ugly[i2] * 2
if x == v3:
i3 += 1
v3 = ugly[i3] * 3
if x == v5:
i5 += 1
v5 = ugly[i5] * 5
return x
|
Add Problem Ugly Number II# https://leetcode.com/problems/ugly-number-ii/
class Solution(object):
def nthUglyNumber(self, n):
"""
:type n: int
:rtype: int
"""
ugly = [1] * n
i2 = i3 = i5 = -1
x = v2 = v3 = v5 = 1
for k in xrange(n):
x = min(v2, v3, v5)
ugly[k] = x
if x == v2:
i2 += 1
v2 = ugly[i2] * 2
if x == v3:
i3 += 1
v3 = ugly[i3] * 3
if x == v5:
i5 += 1
v5 = ugly[i5] * 5
return x
|
<commit_before><commit_msg>Add Problem Ugly Number II<commit_after># https://leetcode.com/problems/ugly-number-ii/
class Solution(object):
def nthUglyNumber(self, n):
"""
:type n: int
:rtype: int
"""
ugly = [1] * n
i2 = i3 = i5 = -1
x = v2 = v3 = v5 = 1
for k in xrange(n):
x = min(v2, v3, v5)
ugly[k] = x
if x == v2:
i2 += 1
v2 = ugly[i2] * 2
if x == v3:
i3 += 1
v3 = ugly[i3] * 3
if x == v5:
i5 += 1
v5 = ugly[i5] * 5
return x
|
|
46a060666997df718b4072a6ed3ef93dbedb6107
|
djangae/settings_base.py
|
djangae/settings_base.py
|
DEFAULT_FILE_STORAGE = 'djangae.storage.BlobstoreStorage'
FILE_UPLOAD_MAX_MEMORY_SIZE = 1024 * 1024
FILE_UPLOAD_HANDLERS = (
'djangae.storage.BlobstoreFileUploadHandler',
'django.core.files.uploadhandler.MemoryFileUploadHandler',
)
DATABASES = {
'default': {
'ENGINE': 'djangae.db.backends.appengine'
}
}
GENERATE_SPECIAL_INDEXES_DURING_TESTING = False
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
}
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
TEST_RUNNER = 'djangae.test_runner.DjangaeTestSuiteRunner'
EMAIL_BACKEND = 'djangae.mail.AsyncEmailBackend'
|
DEFAULT_FILE_STORAGE = 'djangae.storage.BlobstoreStorage'
FILE_UPLOAD_MAX_MEMORY_SIZE = 1024 * 1024
FILE_UPLOAD_HANDLERS = (
'djangae.storage.BlobstoreFileUploadHandler',
'django.core.files.uploadhandler.MemoryFileUploadHandler',
)
DATABASES = {
'default': {
'ENGINE': 'djangae.db.backends.appengine'
}
}
GENERATE_SPECIAL_INDEXES_DURING_TESTING = False
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
}
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
TEST_RUNNER = 'djangae.test_runner.DjangaeTestSuiteRunner'
EMAIL_BACKEND = 'djangae.mail.AsyncEmailBackend'
#Setting to *.appspot.com is OK, because GAE takes care of domain routing
#it needs to be like this because of the syntax of addressing non-default versions
# (e.g. -dot-)
ALLOWED_HOSTS = (".appspot.com", )
|
Make sure ALLOWED_HOSTS is correctly set
|
Make sure ALLOWED_HOSTS is correctly set
|
Python
|
bsd-3-clause
|
pablorecio/djangae,martinogden/djangae,jscissr/djangae,chargrizzle/djangae,potatolondon/djangae,martinogden/djangae,armirusco/djangae,trik/djangae,asendecka/djangae,leekchan/djangae,stucox/djangae,b-cannon/my_djae,trik/djangae,SiPiggles/djangae,potatolondon/djangae,nealedj/djangae,stucox/djangae,kirberich/djangae,jscissr/djangae,kirberich/djangae,kirberich/djangae,wangjun/djangae,pablorecio/djangae,chargrizzle/djangae,asendecka/djangae,pablorecio/djangae,grzes/djangae,nealedj/djangae,stucox/djangae,wangjun/djangae,armirusco/djangae,grzes/djangae,wangjun/djangae,asendecka/djangae,leekchan/djangae,leekchan/djangae,SiPiggles/djangae,trik/djangae,jscissr/djangae,martinogden/djangae,chargrizzle/djangae,nealedj/djangae,SiPiggles/djangae,armirusco/djangae,grzes/djangae
|
DEFAULT_FILE_STORAGE = 'djangae.storage.BlobstoreStorage'
FILE_UPLOAD_MAX_MEMORY_SIZE = 1024 * 1024
FILE_UPLOAD_HANDLERS = (
'djangae.storage.BlobstoreFileUploadHandler',
'django.core.files.uploadhandler.MemoryFileUploadHandler',
)
DATABASES = {
'default': {
'ENGINE': 'djangae.db.backends.appengine'
}
}
GENERATE_SPECIAL_INDEXES_DURING_TESTING = False
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
}
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
TEST_RUNNER = 'djangae.test_runner.DjangaeTestSuiteRunner'
EMAIL_BACKEND = 'djangae.mail.AsyncEmailBackend'
Make sure ALLOWED_HOSTS is correctly set
|
DEFAULT_FILE_STORAGE = 'djangae.storage.BlobstoreStorage'
FILE_UPLOAD_MAX_MEMORY_SIZE = 1024 * 1024
FILE_UPLOAD_HANDLERS = (
'djangae.storage.BlobstoreFileUploadHandler',
'django.core.files.uploadhandler.MemoryFileUploadHandler',
)
DATABASES = {
'default': {
'ENGINE': 'djangae.db.backends.appengine'
}
}
GENERATE_SPECIAL_INDEXES_DURING_TESTING = False
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
}
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
TEST_RUNNER = 'djangae.test_runner.DjangaeTestSuiteRunner'
EMAIL_BACKEND = 'djangae.mail.AsyncEmailBackend'
#Setting to *.appspot.com is OK, because GAE takes care of domain routing
#it needs to be like this because of the syntax of addressing non-default versions
# (e.g. -dot-)
ALLOWED_HOSTS = (".appspot.com", )
|
<commit_before>
DEFAULT_FILE_STORAGE = 'djangae.storage.BlobstoreStorage'
FILE_UPLOAD_MAX_MEMORY_SIZE = 1024 * 1024
FILE_UPLOAD_HANDLERS = (
'djangae.storage.BlobstoreFileUploadHandler',
'django.core.files.uploadhandler.MemoryFileUploadHandler',
)
DATABASES = {
'default': {
'ENGINE': 'djangae.db.backends.appengine'
}
}
GENERATE_SPECIAL_INDEXES_DURING_TESTING = False
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
}
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
TEST_RUNNER = 'djangae.test_runner.DjangaeTestSuiteRunner'
EMAIL_BACKEND = 'djangae.mail.AsyncEmailBackend'
<commit_msg>Make sure ALLOWED_HOSTS is correctly set<commit_after>
|
DEFAULT_FILE_STORAGE = 'djangae.storage.BlobstoreStorage'
FILE_UPLOAD_MAX_MEMORY_SIZE = 1024 * 1024
FILE_UPLOAD_HANDLERS = (
'djangae.storage.BlobstoreFileUploadHandler',
'django.core.files.uploadhandler.MemoryFileUploadHandler',
)
DATABASES = {
'default': {
'ENGINE': 'djangae.db.backends.appengine'
}
}
GENERATE_SPECIAL_INDEXES_DURING_TESTING = False
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
}
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
TEST_RUNNER = 'djangae.test_runner.DjangaeTestSuiteRunner'
EMAIL_BACKEND = 'djangae.mail.AsyncEmailBackend'
#Setting to *.appspot.com is OK, because GAE takes care of domain routing
#it needs to be like this because of the syntax of addressing non-default versions
# (e.g. -dot-)
ALLOWED_HOSTS = (".appspot.com", )
|
DEFAULT_FILE_STORAGE = 'djangae.storage.BlobstoreStorage'
FILE_UPLOAD_MAX_MEMORY_SIZE = 1024 * 1024
FILE_UPLOAD_HANDLERS = (
'djangae.storage.BlobstoreFileUploadHandler',
'django.core.files.uploadhandler.MemoryFileUploadHandler',
)
DATABASES = {
'default': {
'ENGINE': 'djangae.db.backends.appengine'
}
}
GENERATE_SPECIAL_INDEXES_DURING_TESTING = False
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
}
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
TEST_RUNNER = 'djangae.test_runner.DjangaeTestSuiteRunner'
EMAIL_BACKEND = 'djangae.mail.AsyncEmailBackend'
Make sure ALLOWED_HOSTS is correctly set
DEFAULT_FILE_STORAGE = 'djangae.storage.BlobstoreStorage'
FILE_UPLOAD_MAX_MEMORY_SIZE = 1024 * 1024
FILE_UPLOAD_HANDLERS = (
'djangae.storage.BlobstoreFileUploadHandler',
'django.core.files.uploadhandler.MemoryFileUploadHandler',
)
DATABASES = {
'default': {
'ENGINE': 'djangae.db.backends.appengine'
}
}
GENERATE_SPECIAL_INDEXES_DURING_TESTING = False
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
}
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
TEST_RUNNER = 'djangae.test_runner.DjangaeTestSuiteRunner'
EMAIL_BACKEND = 'djangae.mail.AsyncEmailBackend'
#Setting to *.appspot.com is OK, because GAE takes care of domain routing
#it needs to be like this because of the syntax of addressing non-default versions
# (e.g. -dot-)
ALLOWED_HOSTS = (".appspot.com", )
|
<commit_before>
DEFAULT_FILE_STORAGE = 'djangae.storage.BlobstoreStorage'
FILE_UPLOAD_MAX_MEMORY_SIZE = 1024 * 1024
FILE_UPLOAD_HANDLERS = (
'djangae.storage.BlobstoreFileUploadHandler',
'django.core.files.uploadhandler.MemoryFileUploadHandler',
)
DATABASES = {
'default': {
'ENGINE': 'djangae.db.backends.appengine'
}
}
GENERATE_SPECIAL_INDEXES_DURING_TESTING = False
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
}
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
TEST_RUNNER = 'djangae.test_runner.DjangaeTestSuiteRunner'
EMAIL_BACKEND = 'djangae.mail.AsyncEmailBackend'
<commit_msg>Make sure ALLOWED_HOSTS is correctly set<commit_after>
DEFAULT_FILE_STORAGE = 'djangae.storage.BlobstoreStorage'
FILE_UPLOAD_MAX_MEMORY_SIZE = 1024 * 1024
FILE_UPLOAD_HANDLERS = (
'djangae.storage.BlobstoreFileUploadHandler',
'django.core.files.uploadhandler.MemoryFileUploadHandler',
)
DATABASES = {
'default': {
'ENGINE': 'djangae.db.backends.appengine'
}
}
GENERATE_SPECIAL_INDEXES_DURING_TESTING = False
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
}
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
TEST_RUNNER = 'djangae.test_runner.DjangaeTestSuiteRunner'
EMAIL_BACKEND = 'djangae.mail.AsyncEmailBackend'
#Setting to *.appspot.com is OK, because GAE takes care of domain routing
#it needs to be like this because of the syntax of addressing non-default versions
# (e.g. -dot-)
ALLOWED_HOSTS = (".appspot.com", )
|
6d65c54483ff572c849aa984c4ff8071c610cfeb
|
bin/switch-country.py
|
bin/switch-country.py
|
#!/usr/bin/env python
# coding=UTF-8
# If you have a standard Mzalendo setup (see below) and need to switch
# between developing for different countries, this script can be
# useful for making that switch simply. It assumes that you have the
# following directory hierarchy, config files and symlinks set up:
#
# .
# ├── collected_static
# ├── media_root -> media_root.kenya
# ├── media_root.kenya
# ├── media_root.nigeria
# ├── media_root.south-africa
# ├── mzalendo
# │ ├── .git
# │ ├── bin
# │ ├── conf
# │ │ ├── general-kenya.yml
# │ │ ├── general-nigeria.yml
# │ │ ├── general-south-africa.yml
# │ │ └── general.yml -> general-kenya.yml
# │ ├── mzalendo
# │ │ ├── core
# ...
import os
import sys
script_directory = os.path.dirname(os.path.realpath(sys.argv[0]))
mzalendo_directory = os.path.join(script_directory, '..', '..')
mzalendo_directory = os.path.normpath(mzalendo_directory)
available_mzalendi = ('nigeria', 'kenya', 'south-africa')
def usage_and_exit():
print >> sys.stderr, "Usage: %s <COUNTRY>" % (sys.argv[0],)
print >> sys.stderr, "... where country is one of:"
for country in available_mzalendi:
print >> sys.stderr, " ", country
sys.exit(1)
if len(sys.argv) != 2:
usage_and_exit()
requested = sys.argv[1]
if requested not in available_mzalendi:
usage_and_exit()
media_root_symlink = os.path.join(mzalendo_directory, 'media_root')
general_yml_symlink = os.path.join(mzalendo_directory, 'mzalendo', 'conf', 'general.yml')
media_root_target = 'media_root.' + requested
general_yml_target = 'general-' + requested + '.yml'
def switch_link(symlink_filename, target_filename):
if not os.path.islink(symlink_filename):
print >> sys.stderr, "%s was not a symlink, and should be" % (symlink_filename,)
sys.exit(1)
full_target_filename = os.path.join(os.path.dirname(symlink_filename),
target_filename)
if not os.path.exists(full_target_filename):
print >> sys.stderr, "The intended target of the symlink (%s) didn't exist" % (target_filename,)
sys.exit(1)
os.unlink(symlink_filename)
os.symlink(target_filename, symlink)
for target, symlink in ((media_root_target, media_root_symlink),
(general_yml_target, general_yml_symlink)):
switch_link(symlink, target)
|
Add a help script to help switching between countries
|
Add a help script to help switching between countries
This may be useful for people using one copy for
development who have to switch between working on
different countries.
|
Python
|
agpl-3.0
|
hzj123/56th,hzj123/56th,mysociety/pombola,patricmutwiri/pombola,ken-muturi/pombola,mysociety/pombola,ken-muturi/pombola,patricmutwiri/pombola,hzj123/56th,geoffkilpin/pombola,patricmutwiri/pombola,ken-muturi/pombola,patricmutwiri/pombola,mysociety/pombola,mysociety/pombola,geoffkilpin/pombola,ken-muturi/pombola,geoffkilpin/pombola,mysociety/pombola,patricmutwiri/pombola,geoffkilpin/pombola,geoffkilpin/pombola,ken-muturi/pombola,hzj123/56th,ken-muturi/pombola,hzj123/56th,hzj123/56th,mysociety/pombola,patricmutwiri/pombola,geoffkilpin/pombola
|
Add a help script to help switching between countries
This may be useful for people using one copy for
development who have to switch between working on
different countries.
|
#!/usr/bin/env python
# coding=UTF-8
# If you have a standard Mzalendo setup (see below) and need to switch
# between developing for different countries, this script can be
# useful for making that switch simply. It assumes that you have the
# following directory hierarchy, config files and symlinks set up:
#
# .
# ├── collected_static
# ├── media_root -> media_root.kenya
# ├── media_root.kenya
# ├── media_root.nigeria
# ├── media_root.south-africa
# ├── mzalendo
# │ ├── .git
# │ ├── bin
# │ ├── conf
# │ │ ├── general-kenya.yml
# │ │ ├── general-nigeria.yml
# │ │ ├── general-south-africa.yml
# │ │ └── general.yml -> general-kenya.yml
# │ ├── mzalendo
# │ │ ├── core
# ...
import os
import sys
script_directory = os.path.dirname(os.path.realpath(sys.argv[0]))
mzalendo_directory = os.path.join(script_directory, '..', '..')
mzalendo_directory = os.path.normpath(mzalendo_directory)
available_mzalendi = ('nigeria', 'kenya', 'south-africa')
def usage_and_exit():
print >> sys.stderr, "Usage: %s <COUNTRY>" % (sys.argv[0],)
print >> sys.stderr, "... where country is one of:"
for country in available_mzalendi:
print >> sys.stderr, " ", country
sys.exit(1)
if len(sys.argv) != 2:
usage_and_exit()
requested = sys.argv[1]
if requested not in available_mzalendi:
usage_and_exit()
media_root_symlink = os.path.join(mzalendo_directory, 'media_root')
general_yml_symlink = os.path.join(mzalendo_directory, 'mzalendo', 'conf', 'general.yml')
media_root_target = 'media_root.' + requested
general_yml_target = 'general-' + requested + '.yml'
def switch_link(symlink_filename, target_filename):
if not os.path.islink(symlink_filename):
print >> sys.stderr, "%s was not a symlink, and should be" % (symlink_filename,)
sys.exit(1)
full_target_filename = os.path.join(os.path.dirname(symlink_filename),
target_filename)
if not os.path.exists(full_target_filename):
print >> sys.stderr, "The intended target of the symlink (%s) didn't exist" % (target_filename,)
sys.exit(1)
os.unlink(symlink_filename)
os.symlink(target_filename, symlink)
for target, symlink in ((media_root_target, media_root_symlink),
(general_yml_target, general_yml_symlink)):
switch_link(symlink, target)
|
<commit_before><commit_msg>Add a help script to help switching between countries
This may be useful for people using one copy for
development who have to switch between working on
different countries.<commit_after>
|
#!/usr/bin/env python
# coding=UTF-8
# If you have a standard Mzalendo setup (see below) and need to switch
# between developing for different countries, this script can be
# useful for making that switch simply. It assumes that you have the
# following directory hierarchy, config files and symlinks set up:
#
# .
# ├── collected_static
# ├── media_root -> media_root.kenya
# ├── media_root.kenya
# ├── media_root.nigeria
# ├── media_root.south-africa
# ├── mzalendo
# │ ├── .git
# │ ├── bin
# │ ├── conf
# │ │ ├── general-kenya.yml
# │ │ ├── general-nigeria.yml
# │ │ ├── general-south-africa.yml
# │ │ └── general.yml -> general-kenya.yml
# │ ├── mzalendo
# │ │ ├── core
# ...
import os
import sys
script_directory = os.path.dirname(os.path.realpath(sys.argv[0]))
mzalendo_directory = os.path.join(script_directory, '..', '..')
mzalendo_directory = os.path.normpath(mzalendo_directory)
available_mzalendi = ('nigeria', 'kenya', 'south-africa')
def usage_and_exit():
print >> sys.stderr, "Usage: %s <COUNTRY>" % (sys.argv[0],)
print >> sys.stderr, "... where country is one of:"
for country in available_mzalendi:
print >> sys.stderr, " ", country
sys.exit(1)
if len(sys.argv) != 2:
usage_and_exit()
requested = sys.argv[1]
if requested not in available_mzalendi:
usage_and_exit()
media_root_symlink = os.path.join(mzalendo_directory, 'media_root')
general_yml_symlink = os.path.join(mzalendo_directory, 'mzalendo', 'conf', 'general.yml')
media_root_target = 'media_root.' + requested
general_yml_target = 'general-' + requested + '.yml'
def switch_link(symlink_filename, target_filename):
if not os.path.islink(symlink_filename):
print >> sys.stderr, "%s was not a symlink, and should be" % (symlink_filename,)
sys.exit(1)
full_target_filename = os.path.join(os.path.dirname(symlink_filename),
target_filename)
if not os.path.exists(full_target_filename):
print >> sys.stderr, "The intended target of the symlink (%s) didn't exist" % (target_filename,)
sys.exit(1)
os.unlink(symlink_filename)
os.symlink(target_filename, symlink)
for target, symlink in ((media_root_target, media_root_symlink),
(general_yml_target, general_yml_symlink)):
switch_link(symlink, target)
|
Add a help script to help switching between countries
This may be useful for people using one copy for
development who have to switch between working on
different countries.#!/usr/bin/env python
# coding=UTF-8
# If you have a standard Mzalendo setup (see below) and need to switch
# between developing for different countries, this script can be
# useful for making that switch simply. It assumes that you have the
# following directory hierarchy, config files and symlinks set up:
#
# .
# ├── collected_static
# ├── media_root -> media_root.kenya
# ├── media_root.kenya
# ├── media_root.nigeria
# ├── media_root.south-africa
# ├── mzalendo
# │ ├── .git
# │ ├── bin
# │ ├── conf
# │ │ ├── general-kenya.yml
# │ │ ├── general-nigeria.yml
# │ │ ├── general-south-africa.yml
# │ │ └── general.yml -> general-kenya.yml
# │ ├── mzalendo
# │ │ ├── core
# ...
import os
import sys
script_directory = os.path.dirname(os.path.realpath(sys.argv[0]))
mzalendo_directory = os.path.join(script_directory, '..', '..')
mzalendo_directory = os.path.normpath(mzalendo_directory)
available_mzalendi = ('nigeria', 'kenya', 'south-africa')
def usage_and_exit():
print >> sys.stderr, "Usage: %s <COUNTRY>" % (sys.argv[0],)
print >> sys.stderr, "... where country is one of:"
for country in available_mzalendi:
print >> sys.stderr, " ", country
sys.exit(1)
if len(sys.argv) != 2:
usage_and_exit()
requested = sys.argv[1]
if requested not in available_mzalendi:
usage_and_exit()
media_root_symlink = os.path.join(mzalendo_directory, 'media_root')
general_yml_symlink = os.path.join(mzalendo_directory, 'mzalendo', 'conf', 'general.yml')
media_root_target = 'media_root.' + requested
general_yml_target = 'general-' + requested + '.yml'
def switch_link(symlink_filename, target_filename):
if not os.path.islink(symlink_filename):
print >> sys.stderr, "%s was not a symlink, and should be" % (symlink_filename,)
sys.exit(1)
full_target_filename = os.path.join(os.path.dirname(symlink_filename),
target_filename)
if not os.path.exists(full_target_filename):
print >> sys.stderr, "The intended target of the symlink (%s) didn't exist" % (target_filename,)
sys.exit(1)
os.unlink(symlink_filename)
os.symlink(target_filename, symlink)
for target, symlink in ((media_root_target, media_root_symlink),
(general_yml_target, general_yml_symlink)):
switch_link(symlink, target)
|
<commit_before><commit_msg>Add a help script to help switching between countries
This may be useful for people using one copy for
development who have to switch between working on
different countries.<commit_after>#!/usr/bin/env python
# coding=UTF-8
# If you have a standard Mzalendo setup (see below) and need to switch
# between developing for different countries, this script can be
# useful for making that switch simply. It assumes that you have the
# following directory hierarchy, config files and symlinks set up:
#
# .
# ├── collected_static
# ├── media_root -> media_root.kenya
# ├── media_root.kenya
# ├── media_root.nigeria
# ├── media_root.south-africa
# ├── mzalendo
# │ ├── .git
# │ ├── bin
# │ ├── conf
# │ │ ├── general-kenya.yml
# │ │ ├── general-nigeria.yml
# │ │ ├── general-south-africa.yml
# │ │ └── general.yml -> general-kenya.yml
# │ ├── mzalendo
# │ │ ├── core
# ...
import os
import sys
script_directory = os.path.dirname(os.path.realpath(sys.argv[0]))
mzalendo_directory = os.path.join(script_directory, '..', '..')
mzalendo_directory = os.path.normpath(mzalendo_directory)
available_mzalendi = ('nigeria', 'kenya', 'south-africa')
def usage_and_exit():
print >> sys.stderr, "Usage: %s <COUNTRY>" % (sys.argv[0],)
print >> sys.stderr, "... where country is one of:"
for country in available_mzalendi:
print >> sys.stderr, " ", country
sys.exit(1)
if len(sys.argv) != 2:
usage_and_exit()
requested = sys.argv[1]
if requested not in available_mzalendi:
usage_and_exit()
media_root_symlink = os.path.join(mzalendo_directory, 'media_root')
general_yml_symlink = os.path.join(mzalendo_directory, 'mzalendo', 'conf', 'general.yml')
media_root_target = 'media_root.' + requested
general_yml_target = 'general-' + requested + '.yml'
def switch_link(symlink_filename, target_filename):
if not os.path.islink(symlink_filename):
print >> sys.stderr, "%s was not a symlink, and should be" % (symlink_filename,)
sys.exit(1)
full_target_filename = os.path.join(os.path.dirname(symlink_filename),
target_filename)
if not os.path.exists(full_target_filename):
print >> sys.stderr, "The intended target of the symlink (%s) didn't exist" % (target_filename,)
sys.exit(1)
os.unlink(symlink_filename)
os.symlink(target_filename, symlink)
for target, symlink in ((media_root_target, media_root_symlink),
(general_yml_target, general_yml_symlink)):
switch_link(symlink, target)
|
|
6eed00668f5510a6d8590ae89befba8814025f31
|
sunburnt/json.py
|
sunburnt/json.py
|
from __future__ import absolute_import
import json, math
from .schema import SolrResponse, SolrResult
class SunburntJSONEncoder(json.JSONEncoder):
def encode(self, o):
if isinstance(o, SolrResponse):
return self.encode(list(o))
return super(SunburntJSONEncoder, self).encode(o)
def default(self, obj):
if hasattr(obj, "strftime"):
try:
microsecond = obj.microsecond
except AttributeError:
microsecond = int(1000000*math.modf(obj.second)[0])
return u"%s.%sZ" % (obj.strftime("%Y-%m-%dT%H:%M:%S"), microsecond)
return super(SunburntJSONEncoder, self).default(obj)
def dump(obj, fp, *args, **kwargs):
if isinstance(obj, SolrResponse):
obj = list(obj)
elif isinstance(obj, SolrResult):
obj = obj.docs
return json.dump(obj, fp, cls=SunburntJSONEncoder, *args, **kwargs)
def dumps(obj, *args, **kwargs):
if isinstance(obj, SolrResponse):
obj = list(obj)
elif isinstance(obj, SolrResult):
obj = obj.docs
return json.dumps(obj, cls=SunburntJSONEncoder, *args, **kwargs)
load = json.load
loads = json.loads
|
Add experimental JSON output module
|
Add experimental JSON output module
|
Python
|
mit
|
rlskoeser/sunburnt,qmssof/sunburnt,anmar/sunburnt,pixbuffer/sunburnt-spatial,pixbuffer/sunburnt-spatial,tow/sunburnt,rlskoeser/sunburnt,anmar/sunburnt
|
Add experimental JSON output module
|
from __future__ import absolute_import
import json, math
from .schema import SolrResponse, SolrResult
class SunburntJSONEncoder(json.JSONEncoder):
def encode(self, o):
if isinstance(o, SolrResponse):
return self.encode(list(o))
return super(SunburntJSONEncoder, self).encode(o)
def default(self, obj):
if hasattr(obj, "strftime"):
try:
microsecond = obj.microsecond
except AttributeError:
microsecond = int(1000000*math.modf(obj.second)[0])
return u"%s.%sZ" % (obj.strftime("%Y-%m-%dT%H:%M:%S"), microsecond)
return super(SunburntJSONEncoder, self).default(obj)
def dump(obj, fp, *args, **kwargs):
if isinstance(obj, SolrResponse):
obj = list(obj)
elif isinstance(obj, SolrResult):
obj = obj.docs
return json.dump(obj, fp, cls=SunburntJSONEncoder, *args, **kwargs)
def dumps(obj, *args, **kwargs):
if isinstance(obj, SolrResponse):
obj = list(obj)
elif isinstance(obj, SolrResult):
obj = obj.docs
return json.dumps(obj, cls=SunburntJSONEncoder, *args, **kwargs)
load = json.load
loads = json.loads
|
<commit_before><commit_msg>Add experimental JSON output module<commit_after>
|
from __future__ import absolute_import
import json, math
from .schema import SolrResponse, SolrResult
class SunburntJSONEncoder(json.JSONEncoder):
def encode(self, o):
if isinstance(o, SolrResponse):
return self.encode(list(o))
return super(SunburntJSONEncoder, self).encode(o)
def default(self, obj):
if hasattr(obj, "strftime"):
try:
microsecond = obj.microsecond
except AttributeError:
microsecond = int(1000000*math.modf(obj.second)[0])
return u"%s.%sZ" % (obj.strftime("%Y-%m-%dT%H:%M:%S"), microsecond)
return super(SunburntJSONEncoder, self).default(obj)
def dump(obj, fp, *args, **kwargs):
if isinstance(obj, SolrResponse):
obj = list(obj)
elif isinstance(obj, SolrResult):
obj = obj.docs
return json.dump(obj, fp, cls=SunburntJSONEncoder, *args, **kwargs)
def dumps(obj, *args, **kwargs):
if isinstance(obj, SolrResponse):
obj = list(obj)
elif isinstance(obj, SolrResult):
obj = obj.docs
return json.dumps(obj, cls=SunburntJSONEncoder, *args, **kwargs)
load = json.load
loads = json.loads
|
Add experimental JSON output modulefrom __future__ import absolute_import
import json, math
from .schema import SolrResponse, SolrResult
class SunburntJSONEncoder(json.JSONEncoder):
def encode(self, o):
if isinstance(o, SolrResponse):
return self.encode(list(o))
return super(SunburntJSONEncoder, self).encode(o)
def default(self, obj):
if hasattr(obj, "strftime"):
try:
microsecond = obj.microsecond
except AttributeError:
microsecond = int(1000000*math.modf(obj.second)[0])
return u"%s.%sZ" % (obj.strftime("%Y-%m-%dT%H:%M:%S"), microsecond)
return super(SunburntJSONEncoder, self).default(obj)
def dump(obj, fp, *args, **kwargs):
if isinstance(obj, SolrResponse):
obj = list(obj)
elif isinstance(obj, SolrResult):
obj = obj.docs
return json.dump(obj, fp, cls=SunburntJSONEncoder, *args, **kwargs)
def dumps(obj, *args, **kwargs):
if isinstance(obj, SolrResponse):
obj = list(obj)
elif isinstance(obj, SolrResult):
obj = obj.docs
return json.dumps(obj, cls=SunburntJSONEncoder, *args, **kwargs)
load = json.load
loads = json.loads
|
<commit_before><commit_msg>Add experimental JSON output module<commit_after>from __future__ import absolute_import
import json, math
from .schema import SolrResponse, SolrResult
class SunburntJSONEncoder(json.JSONEncoder):
def encode(self, o):
if isinstance(o, SolrResponse):
return self.encode(list(o))
return super(SunburntJSONEncoder, self).encode(o)
def default(self, obj):
if hasattr(obj, "strftime"):
try:
microsecond = obj.microsecond
except AttributeError:
microsecond = int(1000000*math.modf(obj.second)[0])
return u"%s.%sZ" % (obj.strftime("%Y-%m-%dT%H:%M:%S"), microsecond)
return super(SunburntJSONEncoder, self).default(obj)
def dump(obj, fp, *args, **kwargs):
if isinstance(obj, SolrResponse):
obj = list(obj)
elif isinstance(obj, SolrResult):
obj = obj.docs
return json.dump(obj, fp, cls=SunburntJSONEncoder, *args, **kwargs)
def dumps(obj, *args, **kwargs):
if isinstance(obj, SolrResponse):
obj = list(obj)
elif isinstance(obj, SolrResult):
obj = obj.docs
return json.dumps(obj, cls=SunburntJSONEncoder, *args, **kwargs)
load = json.load
loads = json.loads
|
|
cfac2ea76b00620545717667305e78179a8fb390
|
fetch_configs/flutter.py
|
fetch_configs/flutter.py
|
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import config_util # pylint: disable=import-error
# This class doesn't need an __init__ method, so we disable the warning
# pylint: disable=no-init
class Flutter(config_util.Config):
"""Basic Config class for the Flutter repository."""
@staticmethod
def fetch_spec(_props):
solution = {
'custom_deps': {},
'deps_file': 'DEPS',
'managed' : False,
'name' : 'src/flutter',
'safesync_url': '',
'url' : 'https://github.com/flutter/engine.git',
}
spec = {
'solutions': [solution]
}
return {
'type': 'gclient_git',
'gclient_git_spec': spec,
}
@staticmethod
def expected_root(_props):
return 'src'
def main(argv=None):
return Flutter().handle_args(argv)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
Add fetch recipe for the Flutter Engine repository.
|
Add fetch recipe for the Flutter Engine repository.
Tested with the following
$ mkdir flutter_engine
$ cd flutter_engine
$ fetch flutter
$ cd src
# Confirm the source tree and dependencies are as expected.
BUG=None
TEST=see above
Change-Id: Ie1a6653c3b76354e282688e0db50acc112794426
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/tools/depot_tools/+/3026128
Reviewed-by: Dirk Pranke <6693ee08b084bb2ffa767f664c99b689395e0e4b@google.com>
Commit-Queue: Chinmay Garde <2c5f961126c80cfe4f9b4f17931784885f34f766@google.com>
|
Python
|
bsd-3-clause
|
CoherentLabs/depot_tools,CoherentLabs/depot_tools
|
Add fetch recipe for the Flutter Engine repository.
Tested with the following
$ mkdir flutter_engine
$ cd flutter_engine
$ fetch flutter
$ cd src
# Confirm the source tree and dependencies are as expected.
BUG=None
TEST=see above
Change-Id: Ie1a6653c3b76354e282688e0db50acc112794426
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/tools/depot_tools/+/3026128
Reviewed-by: Dirk Pranke <6693ee08b084bb2ffa767f664c99b689395e0e4b@google.com>
Commit-Queue: Chinmay Garde <2c5f961126c80cfe4f9b4f17931784885f34f766@google.com>
|
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import config_util # pylint: disable=import-error
# This class doesn't need an __init__ method, so we disable the warning
# pylint: disable=no-init
class Flutter(config_util.Config):
"""Basic Config class for the Flutter repository."""
@staticmethod
def fetch_spec(_props):
solution = {
'custom_deps': {},
'deps_file': 'DEPS',
'managed' : False,
'name' : 'src/flutter',
'safesync_url': '',
'url' : 'https://github.com/flutter/engine.git',
}
spec = {
'solutions': [solution]
}
return {
'type': 'gclient_git',
'gclient_git_spec': spec,
}
@staticmethod
def expected_root(_props):
return 'src'
def main(argv=None):
return Flutter().handle_args(argv)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
<commit_before><commit_msg>Add fetch recipe for the Flutter Engine repository.
Tested with the following
$ mkdir flutter_engine
$ cd flutter_engine
$ fetch flutter
$ cd src
# Confirm the source tree and dependencies are as expected.
BUG=None
TEST=see above
Change-Id: Ie1a6653c3b76354e282688e0db50acc112794426
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/tools/depot_tools/+/3026128
Reviewed-by: Dirk Pranke <6693ee08b084bb2ffa767f664c99b689395e0e4b@google.com>
Commit-Queue: Chinmay Garde <2c5f961126c80cfe4f9b4f17931784885f34f766@google.com><commit_after>
|
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import config_util # pylint: disable=import-error
# This class doesn't need an __init__ method, so we disable the warning
# pylint: disable=no-init
class Flutter(config_util.Config):
"""Basic Config class for the Flutter repository."""
@staticmethod
def fetch_spec(_props):
solution = {
'custom_deps': {},
'deps_file': 'DEPS',
'managed' : False,
'name' : 'src/flutter',
'safesync_url': '',
'url' : 'https://github.com/flutter/engine.git',
}
spec = {
'solutions': [solution]
}
return {
'type': 'gclient_git',
'gclient_git_spec': spec,
}
@staticmethod
def expected_root(_props):
return 'src'
def main(argv=None):
return Flutter().handle_args(argv)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
Add fetch recipe for the Flutter Engine repository.
Tested with the following
$ mkdir flutter_engine
$ cd flutter_engine
$ fetch flutter
$ cd src
# Confirm the source tree and dependencies are as expected.
BUG=None
TEST=see above
Change-Id: Ie1a6653c3b76354e282688e0db50acc112794426
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/tools/depot_tools/+/3026128
Reviewed-by: Dirk Pranke <6693ee08b084bb2ffa767f664c99b689395e0e4b@google.com>
Commit-Queue: Chinmay Garde <2c5f961126c80cfe4f9b4f17931784885f34f766@google.com># Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import config_util # pylint: disable=import-error
# This class doesn't need an __init__ method, so we disable the warning
# pylint: disable=no-init
class Flutter(config_util.Config):
"""Basic Config class for the Flutter repository."""
@staticmethod
def fetch_spec(_props):
solution = {
'custom_deps': {},
'deps_file': 'DEPS',
'managed' : False,
'name' : 'src/flutter',
'safesync_url': '',
'url' : 'https://github.com/flutter/engine.git',
}
spec = {
'solutions': [solution]
}
return {
'type': 'gclient_git',
'gclient_git_spec': spec,
}
@staticmethod
def expected_root(_props):
return 'src'
def main(argv=None):
return Flutter().handle_args(argv)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
<commit_before><commit_msg>Add fetch recipe for the Flutter Engine repository.
Tested with the following
$ mkdir flutter_engine
$ cd flutter_engine
$ fetch flutter
$ cd src
# Confirm the source tree and dependencies are as expected.
BUG=None
TEST=see above
Change-Id: Ie1a6653c3b76354e282688e0db50acc112794426
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/tools/depot_tools/+/3026128
Reviewed-by: Dirk Pranke <6693ee08b084bb2ffa767f664c99b689395e0e4b@google.com>
Commit-Queue: Chinmay Garde <2c5f961126c80cfe4f9b4f17931784885f34f766@google.com><commit_after># Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import config_util # pylint: disable=import-error
# This class doesn't need an __init__ method, so we disable the warning
# pylint: disable=no-init
class Flutter(config_util.Config):
"""Basic Config class for the Flutter repository."""
@staticmethod
def fetch_spec(_props):
solution = {
'custom_deps': {},
'deps_file': 'DEPS',
'managed' : False,
'name' : 'src/flutter',
'safesync_url': '',
'url' : 'https://github.com/flutter/engine.git',
}
spec = {
'solutions': [solution]
}
return {
'type': 'gclient_git',
'gclient_git_spec': spec,
}
@staticmethod
def expected_root(_props):
return 'src'
def main(argv=None):
return Flutter().handle_args(argv)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
|
0ca43532ea20a2d202c721a371194827c8b74520
|
__openerp__.py
|
__openerp__.py
|
{
'name': 'Genesis Product Multi-Image',
'description': 'This module adds multiple product images (22 currently) into a tab in product.template in a tab called Product Images',
'category': 'Website',
'version': '1.0',
'author': 'Luke Branch',
'depends': ['website', 'website_sale'],
'data': [
'views/product_images.xml',
],
'application': True,
}
|
Add module descriptions and dependencies
|
Add module descriptions and dependencies
{
'name': 'Genesis Product Multi-Image',
'description': 'This module adds multiple product images (22 currently) into a tab in product.template in a tab called Product Images',
'category': 'Website',
'version': '1.0',
'author': 'Luke Branch',
'depends': ['website', 'website_sale'],
'data': [
'views/product_images.xml',
],
'application': True,
}
|
Python
|
mit
|
luistorresm/website_multi_image,luistorresm/website_multi_image,Vauxoo/website_multi_image,yelizariev/website_multi_image,OdooCommunityWidgets/website_multi_image,Vauxoo/website_multi_image,yelizariev/website_multi_image,vauxoo-dev/website_multi_image,OdooCommunityWidgets/website_multi_image,Vauxoo/website_multi_image,vauxoo-dev/website_multi_image,luistorresm/website_multi_image,vauxoo-dev/website_multi_image,lukebranch/website_multi_image,yelizariev/website_multi_image,lukebranch/website_multi_image
|
Add module descriptions and dependencies
{
'name': 'Genesis Product Multi-Image',
'description': 'This module adds multiple product images (22 currently) into a tab in product.template in a tab called Product Images',
'category': 'Website',
'version': '1.0',
'author': 'Luke Branch',
'depends': ['website', 'website_sale'],
'data': [
'views/product_images.xml',
],
'application': True,
}
|
{
'name': 'Genesis Product Multi-Image',
'description': 'This module adds multiple product images (22 currently) into a tab in product.template in a tab called Product Images',
'category': 'Website',
'version': '1.0',
'author': 'Luke Branch',
'depends': ['website', 'website_sale'],
'data': [
'views/product_images.xml',
],
'application': True,
}
|
<commit_before><commit_msg>Add module descriptions and dependencies
{
'name': 'Genesis Product Multi-Image',
'description': 'This module adds multiple product images (22 currently) into a tab in product.template in a tab called Product Images',
'category': 'Website',
'version': '1.0',
'author': 'Luke Branch',
'depends': ['website', 'website_sale'],
'data': [
'views/product_images.xml',
],
'application': True,
}<commit_after>
|
{
'name': 'Genesis Product Multi-Image',
'description': 'This module adds multiple product images (22 currently) into a tab in product.template in a tab called Product Images',
'category': 'Website',
'version': '1.0',
'author': 'Luke Branch',
'depends': ['website', 'website_sale'],
'data': [
'views/product_images.xml',
],
'application': True,
}
|
Add module descriptions and dependencies
{
'name': 'Genesis Product Multi-Image',
'description': 'This module adds multiple product images (22 currently) into a tab in product.template in a tab called Product Images',
'category': 'Website',
'version': '1.0',
'author': 'Luke Branch',
'depends': ['website', 'website_sale'],
'data': [
'views/product_images.xml',
],
'application': True,
}{
'name': 'Genesis Product Multi-Image',
'description': 'This module adds multiple product images (22 currently) into a tab in product.template in a tab called Product Images',
'category': 'Website',
'version': '1.0',
'author': 'Luke Branch',
'depends': ['website', 'website_sale'],
'data': [
'views/product_images.xml',
],
'application': True,
}
|
<commit_before><commit_msg>Add module descriptions and dependencies
{
'name': 'Genesis Product Multi-Image',
'description': 'This module adds multiple product images (22 currently) into a tab in product.template in a tab called Product Images',
'category': 'Website',
'version': '1.0',
'author': 'Luke Branch',
'depends': ['website', 'website_sale'],
'data': [
'views/product_images.xml',
],
'application': True,
}<commit_after>{
'name': 'Genesis Product Multi-Image',
'description': 'This module adds multiple product images (22 currently) into a tab in product.template in a tab called Product Images',
'category': 'Website',
'version': '1.0',
'author': 'Luke Branch',
'depends': ['website', 'website_sale'],
'data': [
'views/product_images.xml',
],
'application': True,
}
|
|
5c1d725103b1d97c0753d3ae2baf7f0835f995eb
|
comrade/functional.py
|
comrade/functional.py
|
def lazy(func):
def lazy_func(self, *args, **kwargs):
cached_attribute = '_cached_%s' % func.__name__
if not hasattr(self, cached_attribute):
setattr(self, cached_attribute, func(self, *args, **kwargs))
return getattr(self, cached_attribute)
return lazy_func
|
Add a @lazy decorator for simple lazy methods.
|
Add a @lazy decorator for simple lazy methods.
|
Python
|
mit
|
bueda/django-comrade
|
Add a @lazy decorator for simple lazy methods.
|
def lazy(func):
def lazy_func(self, *args, **kwargs):
cached_attribute = '_cached_%s' % func.__name__
if not hasattr(self, cached_attribute):
setattr(self, cached_attribute, func(self, *args, **kwargs))
return getattr(self, cached_attribute)
return lazy_func
|
<commit_before><commit_msg>Add a @lazy decorator for simple lazy methods.<commit_after>
|
def lazy(func):
def lazy_func(self, *args, **kwargs):
cached_attribute = '_cached_%s' % func.__name__
if not hasattr(self, cached_attribute):
setattr(self, cached_attribute, func(self, *args, **kwargs))
return getattr(self, cached_attribute)
return lazy_func
|
Add a @lazy decorator for simple lazy methods.def lazy(func):
def lazy_func(self, *args, **kwargs):
cached_attribute = '_cached_%s' % func.__name__
if not hasattr(self, cached_attribute):
setattr(self, cached_attribute, func(self, *args, **kwargs))
return getattr(self, cached_attribute)
return lazy_func
|
<commit_before><commit_msg>Add a @lazy decorator for simple lazy methods.<commit_after>def lazy(func):
def lazy_func(self, *args, **kwargs):
cached_attribute = '_cached_%s' % func.__name__
if not hasattr(self, cached_attribute):
setattr(self, cached_attribute, func(self, *args, **kwargs))
return getattr(self, cached_attribute)
return lazy_func
|
|
353dc990b12647ba784ad06a23dc3e374754081a
|
openstack/tests/functional/network/v2/test_security_group.py
|
openstack/tests/functional/network/v2/test_security_group.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from openstack.network.v2 import security_group
from openstack.tests.functional import base
class TestSecurityGroup(base.BaseFunctionalTest):
NAME = uuid.uuid4().hex
ID = None
@classmethod
def setUpClass(cls):
super(TestSecurityGroup, cls).setUpClass()
sot = cls.conn.network.create_security_group(name=cls.NAME)
assert isinstance(sot, security_group.SecurityGroup)
cls.assertIs(cls.NAME, sot.name)
cls.ID = sot.id
@classmethod
def tearDownClass(cls):
sot = cls.conn.network.delete_security_group(cls.ID,
ignore_missing=False)
cls.assertIs(None, sot)
def test_find(self):
sot = self.conn.network.find_security_group(self.NAME)
self.assertEqual(self.ID, sot.id)
def test_get(self):
sot = self.conn.network.get_security_group(self.ID)
self.assertEqual(self.NAME, sot.name)
self.assertEqual(self.ID, sot.id)
def test_list(self):
names = [o.name for o in self.conn.network.security_groups()]
self.assertIn(self.NAME, names)
|
Add functional tests for security groups
|
Add functional tests for security groups
Change-Id: I90b29c2e48303867ae2f670643171d113e485cf1
|
Python
|
apache-2.0
|
openstack/python-openstacksdk,stackforge/python-openstacksdk,openstack/python-openstacksdk,dtroyer/python-openstacksdk,briancurtin/python-openstacksdk,mtougeron/python-openstacksdk,stackforge/python-openstacksdk,mtougeron/python-openstacksdk,dtroyer/python-openstacksdk,dudymas/python-openstacksdk,briancurtin/python-openstacksdk,dudymas/python-openstacksdk
|
Add functional tests for security groups
Change-Id: I90b29c2e48303867ae2f670643171d113e485cf1
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from openstack.network.v2 import security_group
from openstack.tests.functional import base
class TestSecurityGroup(base.BaseFunctionalTest):
NAME = uuid.uuid4().hex
ID = None
@classmethod
def setUpClass(cls):
super(TestSecurityGroup, cls).setUpClass()
sot = cls.conn.network.create_security_group(name=cls.NAME)
assert isinstance(sot, security_group.SecurityGroup)
cls.assertIs(cls.NAME, sot.name)
cls.ID = sot.id
@classmethod
def tearDownClass(cls):
sot = cls.conn.network.delete_security_group(cls.ID,
ignore_missing=False)
cls.assertIs(None, sot)
def test_find(self):
sot = self.conn.network.find_security_group(self.NAME)
self.assertEqual(self.ID, sot.id)
def test_get(self):
sot = self.conn.network.get_security_group(self.ID)
self.assertEqual(self.NAME, sot.name)
self.assertEqual(self.ID, sot.id)
def test_list(self):
names = [o.name for o in self.conn.network.security_groups()]
self.assertIn(self.NAME, names)
|
<commit_before><commit_msg>Add functional tests for security groups
Change-Id: I90b29c2e48303867ae2f670643171d113e485cf1<commit_after>
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from openstack.network.v2 import security_group
from openstack.tests.functional import base
class TestSecurityGroup(base.BaseFunctionalTest):
NAME = uuid.uuid4().hex
ID = None
@classmethod
def setUpClass(cls):
super(TestSecurityGroup, cls).setUpClass()
sot = cls.conn.network.create_security_group(name=cls.NAME)
assert isinstance(sot, security_group.SecurityGroup)
cls.assertIs(cls.NAME, sot.name)
cls.ID = sot.id
@classmethod
def tearDownClass(cls):
sot = cls.conn.network.delete_security_group(cls.ID,
ignore_missing=False)
cls.assertIs(None, sot)
def test_find(self):
sot = self.conn.network.find_security_group(self.NAME)
self.assertEqual(self.ID, sot.id)
def test_get(self):
sot = self.conn.network.get_security_group(self.ID)
self.assertEqual(self.NAME, sot.name)
self.assertEqual(self.ID, sot.id)
def test_list(self):
names = [o.name for o in self.conn.network.security_groups()]
self.assertIn(self.NAME, names)
|
Add functional tests for security groups
Change-Id: I90b29c2e48303867ae2f670643171d113e485cf1# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from openstack.network.v2 import security_group
from openstack.tests.functional import base
class TestSecurityGroup(base.BaseFunctionalTest):
NAME = uuid.uuid4().hex
ID = None
@classmethod
def setUpClass(cls):
super(TestSecurityGroup, cls).setUpClass()
sot = cls.conn.network.create_security_group(name=cls.NAME)
assert isinstance(sot, security_group.SecurityGroup)
cls.assertIs(cls.NAME, sot.name)
cls.ID = sot.id
@classmethod
def tearDownClass(cls):
sot = cls.conn.network.delete_security_group(cls.ID,
ignore_missing=False)
cls.assertIs(None, sot)
def test_find(self):
sot = self.conn.network.find_security_group(self.NAME)
self.assertEqual(self.ID, sot.id)
def test_get(self):
sot = self.conn.network.get_security_group(self.ID)
self.assertEqual(self.NAME, sot.name)
self.assertEqual(self.ID, sot.id)
def test_list(self):
names = [o.name for o in self.conn.network.security_groups()]
self.assertIn(self.NAME, names)
|
<commit_before><commit_msg>Add functional tests for security groups
Change-Id: I90b29c2e48303867ae2f670643171d113e485cf1<commit_after># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from openstack.network.v2 import security_group
from openstack.tests.functional import base
class TestSecurityGroup(base.BaseFunctionalTest):
NAME = uuid.uuid4().hex
ID = None
@classmethod
def setUpClass(cls):
super(TestSecurityGroup, cls).setUpClass()
sot = cls.conn.network.create_security_group(name=cls.NAME)
assert isinstance(sot, security_group.SecurityGroup)
cls.assertIs(cls.NAME, sot.name)
cls.ID = sot.id
@classmethod
def tearDownClass(cls):
sot = cls.conn.network.delete_security_group(cls.ID,
ignore_missing=False)
cls.assertIs(None, sot)
def test_find(self):
sot = self.conn.network.find_security_group(self.NAME)
self.assertEqual(self.ID, sot.id)
def test_get(self):
sot = self.conn.network.get_security_group(self.ID)
self.assertEqual(self.NAME, sot.name)
self.assertEqual(self.ID, sot.id)
def test_list(self):
names = [o.name for o in self.conn.network.security_groups()]
self.assertIn(self.NAME, names)
|
|
14798847730a8746c1a7bad18a2f9e0fda7e0756
|
wagtail/tests/testapp/migrations/0006_sectionedrichtextpage_sectionedrichtextpagesection.py
|
wagtail/tests/testapp/migrations/0006_sectionedrichtextpage_sectionedrichtextpagesection.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import modelcluster.fields
import wagtail.wagtailcore.fields
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0029_unicode_slugfield_dj19'),
('tests', '0005_customrichblockfieldpage_customrichtextfieldpage_defaultrichblockfieldpage_defaultrichtextfieldpage'),
]
operations = [
migrations.CreateModel(
name='SectionedRichTextPage',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, to='wagtailcore.Page', serialize=False, auto_created=True, primary_key=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='SectionedRichTextPageSection',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
('sort_order', models.IntegerField(editable=False, null=True, blank=True)),
('body', wagtail.wagtailcore.fields.RichTextField()),
('page', modelcluster.fields.ParentalKey(related_name='sections', to='tests.SectionedRichTextPage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import modelcluster.fields
import wagtail.wagtailcore.fields
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0028_merge'),
('tests', '0005_customrichblockfieldpage_customrichtextfieldpage_defaultrichblockfieldpage_defaultrichtextfieldpage'),
]
operations = [
migrations.CreateModel(
name='SectionedRichTextPage',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, to='wagtailcore.Page', serialize=False, auto_created=True, primary_key=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='SectionedRichTextPageSection',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
('sort_order', models.IntegerField(editable=False, null=True, blank=True)),
('body', wagtail.wagtailcore.fields.RichTextField()),
('page', modelcluster.fields.ParentalKey(related_name='sections', to='tests.SectionedRichTextPage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
]
|
Fix test migration for Wagtail 1.5
|
Fix test migration for Wagtail 1.5
|
Python
|
bsd-3-clause
|
nilnvoid/wagtail,kurtrwall/wagtail,gasman/wagtail,zerolab/wagtail,kaedroho/wagtail,kurtrwall/wagtail,torchbox/wagtail,zerolab/wagtail,mikedingjan/wagtail,rsalmaso/wagtail,nilnvoid/wagtail,kaedroho/wagtail,nutztherookie/wagtail,takeflight/wagtail,gasman/wagtail,kaedroho/wagtail,wagtail/wagtail,jnns/wagtail,nimasmi/wagtail,FlipperPA/wagtail,kurtw/wagtail,zerolab/wagtail,mixxorz/wagtail,FlipperPA/wagtail,Toshakins/wagtail,wagtail/wagtail,takeflight/wagtail,timorieber/wagtail,chrxr/wagtail,kurtw/wagtail,mixxorz/wagtail,nimasmi/wagtail,iansprice/wagtail,kurtrwall/wagtail,Toshakins/wagtail,zerolab/wagtail,mikedingjan/wagtail,rsalmaso/wagtail,FlipperPA/wagtail,nealtodd/wagtail,gasman/wagtail,iansprice/wagtail,mixxorz/wagtail,chrxr/wagtail,zerolab/wagtail,Toshakins/wagtail,takeflight/wagtail,thenewguy/wagtail,nealtodd/wagtail,timorieber/wagtail,iansprice/wagtail,thenewguy/wagtail,mixxorz/wagtail,jnns/wagtail,wagtail/wagtail,nutztherookie/wagtail,thenewguy/wagtail,kurtw/wagtail,mikedingjan/wagtail,takeflight/wagtail,nutztherookie/wagtail,chrxr/wagtail,torchbox/wagtail,nealtodd/wagtail,timorieber/wagtail,nealtodd/wagtail,jnns/wagtail,kaedroho/wagtail,gasman/wagtail,nimasmi/wagtail,rsalmaso/wagtail,Toshakins/wagtail,wagtail/wagtail,kaedroho/wagtail,chrxr/wagtail,thenewguy/wagtail,rsalmaso/wagtail,torchbox/wagtail,timorieber/wagtail,nilnvoid/wagtail,FlipperPA/wagtail,mixxorz/wagtail,kurtw/wagtail,thenewguy/wagtail,nimasmi/wagtail,nutztherookie/wagtail,iansprice/wagtail,kurtrwall/wagtail,nilnvoid/wagtail,torchbox/wagtail,jnns/wagtail,mikedingjan/wagtail,wagtail/wagtail,gasman/wagtail,rsalmaso/wagtail
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import modelcluster.fields
import wagtail.wagtailcore.fields
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0029_unicode_slugfield_dj19'),
('tests', '0005_customrichblockfieldpage_customrichtextfieldpage_defaultrichblockfieldpage_defaultrichtextfieldpage'),
]
operations = [
migrations.CreateModel(
name='SectionedRichTextPage',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, to='wagtailcore.Page', serialize=False, auto_created=True, primary_key=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='SectionedRichTextPageSection',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
('sort_order', models.IntegerField(editable=False, null=True, blank=True)),
('body', wagtail.wagtailcore.fields.RichTextField()),
('page', modelcluster.fields.ParentalKey(related_name='sections', to='tests.SectionedRichTextPage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
]
Fix test migration for Wagtail 1.5
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import modelcluster.fields
import wagtail.wagtailcore.fields
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0028_merge'),
('tests', '0005_customrichblockfieldpage_customrichtextfieldpage_defaultrichblockfieldpage_defaultrichtextfieldpage'),
]
operations = [
migrations.CreateModel(
name='SectionedRichTextPage',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, to='wagtailcore.Page', serialize=False, auto_created=True, primary_key=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='SectionedRichTextPageSection',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
('sort_order', models.IntegerField(editable=False, null=True, blank=True)),
('body', wagtail.wagtailcore.fields.RichTextField()),
('page', modelcluster.fields.ParentalKey(related_name='sections', to='tests.SectionedRichTextPage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
]
|
<commit_before># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import modelcluster.fields
import wagtail.wagtailcore.fields
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0029_unicode_slugfield_dj19'),
('tests', '0005_customrichblockfieldpage_customrichtextfieldpage_defaultrichblockfieldpage_defaultrichtextfieldpage'),
]
operations = [
migrations.CreateModel(
name='SectionedRichTextPage',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, to='wagtailcore.Page', serialize=False, auto_created=True, primary_key=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='SectionedRichTextPageSection',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
('sort_order', models.IntegerField(editable=False, null=True, blank=True)),
('body', wagtail.wagtailcore.fields.RichTextField()),
('page', modelcluster.fields.ParentalKey(related_name='sections', to='tests.SectionedRichTextPage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
]
<commit_msg>Fix test migration for Wagtail 1.5<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import modelcluster.fields
import wagtail.wagtailcore.fields
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0028_merge'),
('tests', '0005_customrichblockfieldpage_customrichtextfieldpage_defaultrichblockfieldpage_defaultrichtextfieldpage'),
]
operations = [
migrations.CreateModel(
name='SectionedRichTextPage',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, to='wagtailcore.Page', serialize=False, auto_created=True, primary_key=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='SectionedRichTextPageSection',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
('sort_order', models.IntegerField(editable=False, null=True, blank=True)),
('body', wagtail.wagtailcore.fields.RichTextField()),
('page', modelcluster.fields.ParentalKey(related_name='sections', to='tests.SectionedRichTextPage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import modelcluster.fields
import wagtail.wagtailcore.fields
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0029_unicode_slugfield_dj19'),
('tests', '0005_customrichblockfieldpage_customrichtextfieldpage_defaultrichblockfieldpage_defaultrichtextfieldpage'),
]
operations = [
migrations.CreateModel(
name='SectionedRichTextPage',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, to='wagtailcore.Page', serialize=False, auto_created=True, primary_key=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='SectionedRichTextPageSection',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
('sort_order', models.IntegerField(editable=False, null=True, blank=True)),
('body', wagtail.wagtailcore.fields.RichTextField()),
('page', modelcluster.fields.ParentalKey(related_name='sections', to='tests.SectionedRichTextPage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
]
Fix test migration for Wagtail 1.5# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import modelcluster.fields
import wagtail.wagtailcore.fields
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0028_merge'),
('tests', '0005_customrichblockfieldpage_customrichtextfieldpage_defaultrichblockfieldpage_defaultrichtextfieldpage'),
]
operations = [
migrations.CreateModel(
name='SectionedRichTextPage',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, to='wagtailcore.Page', serialize=False, auto_created=True, primary_key=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='SectionedRichTextPageSection',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
('sort_order', models.IntegerField(editable=False, null=True, blank=True)),
('body', wagtail.wagtailcore.fields.RichTextField()),
('page', modelcluster.fields.ParentalKey(related_name='sections', to='tests.SectionedRichTextPage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
]
|
<commit_before># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import modelcluster.fields
import wagtail.wagtailcore.fields
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0029_unicode_slugfield_dj19'),
('tests', '0005_customrichblockfieldpage_customrichtextfieldpage_defaultrichblockfieldpage_defaultrichtextfieldpage'),
]
operations = [
migrations.CreateModel(
name='SectionedRichTextPage',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, to='wagtailcore.Page', serialize=False, auto_created=True, primary_key=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='SectionedRichTextPageSection',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
('sort_order', models.IntegerField(editable=False, null=True, blank=True)),
('body', wagtail.wagtailcore.fields.RichTextField()),
('page', modelcluster.fields.ParentalKey(related_name='sections', to='tests.SectionedRichTextPage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
]
<commit_msg>Fix test migration for Wagtail 1.5<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import modelcluster.fields
import wagtail.wagtailcore.fields
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0028_merge'),
('tests', '0005_customrichblockfieldpage_customrichtextfieldpage_defaultrichblockfieldpage_defaultrichtextfieldpage'),
]
operations = [
migrations.CreateModel(
name='SectionedRichTextPage',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, to='wagtailcore.Page', serialize=False, auto_created=True, primary_key=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='SectionedRichTextPageSection',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
('sort_order', models.IntegerField(editable=False, null=True, blank=True)),
('body', wagtail.wagtailcore.fields.RichTextField()),
('page', modelcluster.fields.ParentalKey(related_name='sections', to='tests.SectionedRichTextPage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
]
|
9491469f5e34e96880e3e62bc26e742c65065a3f
|
tests/test_init.py
|
tests/test_init.py
|
# -*- coding: utf-8 -*-
# Copyright 2009-2016 Jason Stitt
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import unicode_literals
import unittest
from tidylib import Tidy, PersistentTidy, tidy_document
class TestDocs1(unittest.TestCase):
def test_not_find_lib(self):
with self.assertRaises(OSError):
tidy = Tidy(lib_names=[])
|
Add test for initialization that doesn't find library
|
Add test for initialization that doesn't find library
|
Python
|
mit
|
countergram/pytidylib
|
Add test for initialization that doesn't find library
|
# -*- coding: utf-8 -*-
# Copyright 2009-2016 Jason Stitt
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import unicode_literals
import unittest
from tidylib import Tidy, PersistentTidy, tidy_document
class TestDocs1(unittest.TestCase):
def test_not_find_lib(self):
with self.assertRaises(OSError):
tidy = Tidy(lib_names=[])
|
<commit_before><commit_msg>Add test for initialization that doesn't find library<commit_after>
|
# -*- coding: utf-8 -*-
# Copyright 2009-2016 Jason Stitt
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import unicode_literals
import unittest
from tidylib import Tidy, PersistentTidy, tidy_document
class TestDocs1(unittest.TestCase):
def test_not_find_lib(self):
with self.assertRaises(OSError):
tidy = Tidy(lib_names=[])
|
Add test for initialization that doesn't find library# -*- coding: utf-8 -*-
# Copyright 2009-2016 Jason Stitt
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import unicode_literals
import unittest
from tidylib import Tidy, PersistentTidy, tidy_document
class TestDocs1(unittest.TestCase):
def test_not_find_lib(self):
with self.assertRaises(OSError):
tidy = Tidy(lib_names=[])
|
<commit_before><commit_msg>Add test for initialization that doesn't find library<commit_after># -*- coding: utf-8 -*-
# Copyright 2009-2016 Jason Stitt
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import unicode_literals
import unittest
from tidylib import Tidy, PersistentTidy, tidy_document
class TestDocs1(unittest.TestCase):
def test_not_find_lib(self):
with self.assertRaises(OSError):
tidy = Tidy(lib_names=[])
|
|
834656cc94c13a5aa7f8360e2d399f0b623365f6
|
tests/test_init.py
|
tests/test_init.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
from os import path
import unittest
import click
from click.testing import CliRunner
import mock
import yaml
from tldr import cli
class TestInit(unittest.TestCase):
def setUp(self):
self.config_path = path.join(path.expanduser('~'), '.tldrrc')
if path.exists(self.config_path):
os.remove(self.config_path)
def tearDown(self):
if path.exists(self.config_path):
os.remove(self.config_path)
def test_init(self):
with mock.patch('click.prompt', side_effect=['/tmp/tldr', 'linux']):
runner = CliRunner()
result = runner.invoke(cli.init)
assert result.output == 'Initializing the config file at ~/.tldrrc\n'
assert path.exists(self.config_path)
expected_config = {
'colors': {
'command': 'cyan',
'description': 'blue',
'usage': 'green'
},
'platform': 'linux',
'repo_directory': '/tmp/tldr'
}
with open(self.config_path) as f:
config = yaml.safe_load(f)
assert expected_config == config
|
Add test for `tldr init`
|
Add test for `tldr init`
|
Python
|
mit
|
lord63/tldr.py
|
Add test for `tldr init`
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
from os import path
import unittest
import click
from click.testing import CliRunner
import mock
import yaml
from tldr import cli
class TestInit(unittest.TestCase):
def setUp(self):
self.config_path = path.join(path.expanduser('~'), '.tldrrc')
if path.exists(self.config_path):
os.remove(self.config_path)
def tearDown(self):
if path.exists(self.config_path):
os.remove(self.config_path)
def test_init(self):
with mock.patch('click.prompt', side_effect=['/tmp/tldr', 'linux']):
runner = CliRunner()
result = runner.invoke(cli.init)
assert result.output == 'Initializing the config file at ~/.tldrrc\n'
assert path.exists(self.config_path)
expected_config = {
'colors': {
'command': 'cyan',
'description': 'blue',
'usage': 'green'
},
'platform': 'linux',
'repo_directory': '/tmp/tldr'
}
with open(self.config_path) as f:
config = yaml.safe_load(f)
assert expected_config == config
|
<commit_before><commit_msg>Add test for `tldr init`<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
from os import path
import unittest
import click
from click.testing import CliRunner
import mock
import yaml
from tldr import cli
class TestInit(unittest.TestCase):
def setUp(self):
self.config_path = path.join(path.expanduser('~'), '.tldrrc')
if path.exists(self.config_path):
os.remove(self.config_path)
def tearDown(self):
if path.exists(self.config_path):
os.remove(self.config_path)
def test_init(self):
with mock.patch('click.prompt', side_effect=['/tmp/tldr', 'linux']):
runner = CliRunner()
result = runner.invoke(cli.init)
assert result.output == 'Initializing the config file at ~/.tldrrc\n'
assert path.exists(self.config_path)
expected_config = {
'colors': {
'command': 'cyan',
'description': 'blue',
'usage': 'green'
},
'platform': 'linux',
'repo_directory': '/tmp/tldr'
}
with open(self.config_path) as f:
config = yaml.safe_load(f)
assert expected_config == config
|
Add test for `tldr init`#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
from os import path
import unittest
import click
from click.testing import CliRunner
import mock
import yaml
from tldr import cli
class TestInit(unittest.TestCase):
def setUp(self):
self.config_path = path.join(path.expanduser('~'), '.tldrrc')
if path.exists(self.config_path):
os.remove(self.config_path)
def tearDown(self):
if path.exists(self.config_path):
os.remove(self.config_path)
def test_init(self):
with mock.patch('click.prompt', side_effect=['/tmp/tldr', 'linux']):
runner = CliRunner()
result = runner.invoke(cli.init)
assert result.output == 'Initializing the config file at ~/.tldrrc\n'
assert path.exists(self.config_path)
expected_config = {
'colors': {
'command': 'cyan',
'description': 'blue',
'usage': 'green'
},
'platform': 'linux',
'repo_directory': '/tmp/tldr'
}
with open(self.config_path) as f:
config = yaml.safe_load(f)
assert expected_config == config
|
<commit_before><commit_msg>Add test for `tldr init`<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
from os import path
import unittest
import click
from click.testing import CliRunner
import mock
import yaml
from tldr import cli
class TestInit(unittest.TestCase):
def setUp(self):
self.config_path = path.join(path.expanduser('~'), '.tldrrc')
if path.exists(self.config_path):
os.remove(self.config_path)
def tearDown(self):
if path.exists(self.config_path):
os.remove(self.config_path)
def test_init(self):
with mock.patch('click.prompt', side_effect=['/tmp/tldr', 'linux']):
runner = CliRunner()
result = runner.invoke(cli.init)
assert result.output == 'Initializing the config file at ~/.tldrrc\n'
assert path.exists(self.config_path)
expected_config = {
'colors': {
'command': 'cyan',
'description': 'blue',
'usage': 'green'
},
'platform': 'linux',
'repo_directory': '/tmp/tldr'
}
with open(self.config_path) as f:
config = yaml.safe_load(f)
assert expected_config == config
|
|
30334cd42056b5c4bf229798828ff010ec2a8e07
|
src/fastimgproto/fixtures/profiling.py
|
src/fastimgproto/fixtures/profiling.py
|
from fastimgproto.sourcefind.image import SourceFindImage
from memory_profiler import profile
@profile
def memprof_sourcefindimage(data, detection_n_sigma, analysis_n_sigma):
return SourceFindImage(data = data, detection_n_sigma=5, analysis_n_sigma=3)
|
Add handy profiled wrapper function
|
Add handy profiled wrapper function
|
Python
|
apache-2.0
|
SKA-ScienceDataProcessor/FastImaging-Python,SKA-ScienceDataProcessor/FastImaging-Python
|
Add handy profiled wrapper function
|
from fastimgproto.sourcefind.image import SourceFindImage
from memory_profiler import profile
@profile
def memprof_sourcefindimage(data, detection_n_sigma, analysis_n_sigma):
return SourceFindImage(data = data, detection_n_sigma=5, analysis_n_sigma=3)
|
<commit_before><commit_msg>Add handy profiled wrapper function<commit_after>
|
from fastimgproto.sourcefind.image import SourceFindImage
from memory_profiler import profile
@profile
def memprof_sourcefindimage(data, detection_n_sigma, analysis_n_sigma):
return SourceFindImage(data = data, detection_n_sigma=5, analysis_n_sigma=3)
|
Add handy profiled wrapper functionfrom fastimgproto.sourcefind.image import SourceFindImage
from memory_profiler import profile
@profile
def memprof_sourcefindimage(data, detection_n_sigma, analysis_n_sigma):
return SourceFindImage(data = data, detection_n_sigma=5, analysis_n_sigma=3)
|
<commit_before><commit_msg>Add handy profiled wrapper function<commit_after>from fastimgproto.sourcefind.image import SourceFindImage
from memory_profiler import profile
@profile
def memprof_sourcefindimage(data, detection_n_sigma, analysis_n_sigma):
return SourceFindImage(data = data, detection_n_sigma=5, analysis_n_sigma=3)
|
|
fc51259760c522593218b83b8c10ce4cf3f239db
|
siphon/__init__.py
|
siphon/__init__.py
|
# Version import needs to come first so everyone else can pull on import
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
from . import cdmr # noqa
__all__ = ['catalog', 'cdmr']
|
# Version import needs to come first so everyone else can pull on import
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
__all__ = ['catalog', 'testing', 'util']
|
Remove cdmr main level import.
|
Remove cdmr main level import.
|
Python
|
bsd-3-clause
|
MoonRaker/siphon,hyoklee/siphon,Unidata/siphon,dopplershift/siphon,dopplershift/siphon
|
# Version import needs to come first so everyone else can pull on import
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
from . import cdmr # noqa
__all__ = ['catalog', 'cdmr']
Remove cdmr main level import.
|
# Version import needs to come first so everyone else can pull on import
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
__all__ = ['catalog', 'testing', 'util']
|
<commit_before># Version import needs to come first so everyone else can pull on import
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
from . import cdmr # noqa
__all__ = ['catalog', 'cdmr']
<commit_msg>Remove cdmr main level import.<commit_after>
|
# Version import needs to come first so everyone else can pull on import
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
__all__ = ['catalog', 'testing', 'util']
|
# Version import needs to come first so everyone else can pull on import
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
from . import cdmr # noqa
__all__ = ['catalog', 'cdmr']
Remove cdmr main level import.# Version import needs to come first so everyone else can pull on import
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
__all__ = ['catalog', 'testing', 'util']
|
<commit_before># Version import needs to come first so everyone else can pull on import
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
from . import cdmr # noqa
__all__ = ['catalog', 'cdmr']
<commit_msg>Remove cdmr main level import.<commit_after># Version import needs to come first so everyone else can pull on import
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
__all__ = ['catalog', 'testing', 'util']
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.