commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1786ebacb85b2ddce816fb21b80285d991761695
|
poyo/_nodes.py
|
poyo/_nodes.py
|
# -*- coding: utf-8 -*-
class TreeElement(object):
"""Helper class to identify internal classes."""
def __init__(self, **kwargs):
pass
class ContainerMixin(object):
"""Mixin that can hold TreeElement instances.
Containers can be called to return a dict representation.
"""
def __init__(self, **kwargs):
self._children = []
super(ContainerMixin, self).__init__(**kwargs)
def __iter__(self):
for c in self._children:
yield c
def __call__(self):
return {c.name: c() for c in self}
def add_child(self, child):
"""If the given object is an instance of Child add it to self and
register self as a parent.
"""
if not isinstance(child, ChildMixin):
raise TypeError(
'Requires instance of TreeElement. '
'Got {}'.format(type(child))
)
child.parent = self
self._children.append(child)
class ChildMixin(object):
"""Mixin that can be attached to Container object."""
def __init__(self, **kwargs):
parent = kwargs['parent']
if not isinstance(parent, ContainerMixin):
raise ValueError(
'Parent of ChildMixin instance needs to be a Container.'
)
parent.add_child(self)
super(ChildMixin, self).__init__(**kwargs)
class Root(ContainerMixin, TreeElement):
"""Pure Container class to represent the root of a YAML config."""
def __init__(self, **kwargs):
super(Root, self).__init__(**kwargs)
self.level = -1
class Section(ContainerMixin, ChildMixin, TreeElement):
"""Class that can act as a Child, but also as a Container."""
def __init__(self, name, level, **kwargs):
super(Section, self).__init__(**kwargs)
self.name = name
self.level = level
class Simple(ChildMixin, TreeElement):
"""Class that can solely be used as a Child, f.i. simple key value pairs
in a config.
"""
def __init__(self, name, level, value, **kwargs):
super(Simple, self).__init__(**kwargs)
self.name = name
self.level = level
self.value = value
def __call__(self):
return self.value
|
Implement classes to be used by the deserializer
|
Implement classes to be used by the deserializer
|
Python
|
mit
|
hackebrot/poyo
|
Implement classes to be used by the deserializer
|
# -*- coding: utf-8 -*-
class TreeElement(object):
"""Helper class to identify internal classes."""
def __init__(self, **kwargs):
pass
class ContainerMixin(object):
"""Mixin that can hold TreeElement instances.
Containers can be called to return a dict representation.
"""
def __init__(self, **kwargs):
self._children = []
super(ContainerMixin, self).__init__(**kwargs)
def __iter__(self):
for c in self._children:
yield c
def __call__(self):
return {c.name: c() for c in self}
def add_child(self, child):
"""If the given object is an instance of Child add it to self and
register self as a parent.
"""
if not isinstance(child, ChildMixin):
raise TypeError(
'Requires instance of TreeElement. '
'Got {}'.format(type(child))
)
child.parent = self
self._children.append(child)
class ChildMixin(object):
"""Mixin that can be attached to Container object."""
def __init__(self, **kwargs):
parent = kwargs['parent']
if not isinstance(parent, ContainerMixin):
raise ValueError(
'Parent of ChildMixin instance needs to be a Container.'
)
parent.add_child(self)
super(ChildMixin, self).__init__(**kwargs)
class Root(ContainerMixin, TreeElement):
"""Pure Container class to represent the root of a YAML config."""
def __init__(self, **kwargs):
super(Root, self).__init__(**kwargs)
self.level = -1
class Section(ContainerMixin, ChildMixin, TreeElement):
"""Class that can act as a Child, but also as a Container."""
def __init__(self, name, level, **kwargs):
super(Section, self).__init__(**kwargs)
self.name = name
self.level = level
class Simple(ChildMixin, TreeElement):
"""Class that can solely be used as a Child, f.i. simple key value pairs
in a config.
"""
def __init__(self, name, level, value, **kwargs):
super(Simple, self).__init__(**kwargs)
self.name = name
self.level = level
self.value = value
def __call__(self):
return self.value
|
<commit_before><commit_msg>Implement classes to be used by the deserializer<commit_after>
|
# -*- coding: utf-8 -*-
class TreeElement(object):
"""Helper class to identify internal classes."""
def __init__(self, **kwargs):
pass
class ContainerMixin(object):
"""Mixin that can hold TreeElement instances.
Containers can be called to return a dict representation.
"""
def __init__(self, **kwargs):
self._children = []
super(ContainerMixin, self).__init__(**kwargs)
def __iter__(self):
for c in self._children:
yield c
def __call__(self):
return {c.name: c() for c in self}
def add_child(self, child):
"""If the given object is an instance of Child add it to self and
register self as a parent.
"""
if not isinstance(child, ChildMixin):
raise TypeError(
'Requires instance of TreeElement. '
'Got {}'.format(type(child))
)
child.parent = self
self._children.append(child)
class ChildMixin(object):
"""Mixin that can be attached to Container object."""
def __init__(self, **kwargs):
parent = kwargs['parent']
if not isinstance(parent, ContainerMixin):
raise ValueError(
'Parent of ChildMixin instance needs to be a Container.'
)
parent.add_child(self)
super(ChildMixin, self).__init__(**kwargs)
class Root(ContainerMixin, TreeElement):
"""Pure Container class to represent the root of a YAML config."""
def __init__(self, **kwargs):
super(Root, self).__init__(**kwargs)
self.level = -1
class Section(ContainerMixin, ChildMixin, TreeElement):
"""Class that can act as a Child, but also as a Container."""
def __init__(self, name, level, **kwargs):
super(Section, self).__init__(**kwargs)
self.name = name
self.level = level
class Simple(ChildMixin, TreeElement):
"""Class that can solely be used as a Child, f.i. simple key value pairs
in a config.
"""
def __init__(self, name, level, value, **kwargs):
super(Simple, self).__init__(**kwargs)
self.name = name
self.level = level
self.value = value
def __call__(self):
return self.value
|
Implement classes to be used by the deserializer# -*- coding: utf-8 -*-
class TreeElement(object):
"""Helper class to identify internal classes."""
def __init__(self, **kwargs):
pass
class ContainerMixin(object):
"""Mixin that can hold TreeElement instances.
Containers can be called to return a dict representation.
"""
def __init__(self, **kwargs):
self._children = []
super(ContainerMixin, self).__init__(**kwargs)
def __iter__(self):
for c in self._children:
yield c
def __call__(self):
return {c.name: c() for c in self}
def add_child(self, child):
"""If the given object is an instance of Child add it to self and
register self as a parent.
"""
if not isinstance(child, ChildMixin):
raise TypeError(
'Requires instance of TreeElement. '
'Got {}'.format(type(child))
)
child.parent = self
self._children.append(child)
class ChildMixin(object):
"""Mixin that can be attached to Container object."""
def __init__(self, **kwargs):
parent = kwargs['parent']
if not isinstance(parent, ContainerMixin):
raise ValueError(
'Parent of ChildMixin instance needs to be a Container.'
)
parent.add_child(self)
super(ChildMixin, self).__init__(**kwargs)
class Root(ContainerMixin, TreeElement):
"""Pure Container class to represent the root of a YAML config."""
def __init__(self, **kwargs):
super(Root, self).__init__(**kwargs)
self.level = -1
class Section(ContainerMixin, ChildMixin, TreeElement):
"""Class that can act as a Child, but also as a Container."""
def __init__(self, name, level, **kwargs):
super(Section, self).__init__(**kwargs)
self.name = name
self.level = level
class Simple(ChildMixin, TreeElement):
"""Class that can solely be used as a Child, f.i. simple key value pairs
in a config.
"""
def __init__(self, name, level, value, **kwargs):
super(Simple, self).__init__(**kwargs)
self.name = name
self.level = level
self.value = value
def __call__(self):
return self.value
|
<commit_before><commit_msg>Implement classes to be used by the deserializer<commit_after># -*- coding: utf-8 -*-
class TreeElement(object):
"""Helper class to identify internal classes."""
def __init__(self, **kwargs):
pass
class ContainerMixin(object):
"""Mixin that can hold TreeElement instances.
Containers can be called to return a dict representation.
"""
def __init__(self, **kwargs):
self._children = []
super(ContainerMixin, self).__init__(**kwargs)
def __iter__(self):
for c in self._children:
yield c
def __call__(self):
return {c.name: c() for c in self}
def add_child(self, child):
"""If the given object is an instance of Child add it to self and
register self as a parent.
"""
if not isinstance(child, ChildMixin):
raise TypeError(
'Requires instance of TreeElement. '
'Got {}'.format(type(child))
)
child.parent = self
self._children.append(child)
class ChildMixin(object):
"""Mixin that can be attached to Container object."""
def __init__(self, **kwargs):
parent = kwargs['parent']
if not isinstance(parent, ContainerMixin):
raise ValueError(
'Parent of ChildMixin instance needs to be a Container.'
)
parent.add_child(self)
super(ChildMixin, self).__init__(**kwargs)
class Root(ContainerMixin, TreeElement):
"""Pure Container class to represent the root of a YAML config."""
def __init__(self, **kwargs):
super(Root, self).__init__(**kwargs)
self.level = -1
class Section(ContainerMixin, ChildMixin, TreeElement):
"""Class that can act as a Child, but also as a Container."""
def __init__(self, name, level, **kwargs):
super(Section, self).__init__(**kwargs)
self.name = name
self.level = level
class Simple(ChildMixin, TreeElement):
"""Class that can solely be used as a Child, f.i. simple key value pairs
in a config.
"""
def __init__(self, name, level, value, **kwargs):
super(Simple, self).__init__(**kwargs)
self.name = name
self.level = level
self.value = value
def __call__(self):
return self.value
|
|
d187c51ccd9dc1676b6f16eddecee6dce752d668
|
distarray/tests/test_client.py
|
distarray/tests/test_client.py
|
import unittest
from IPython.parallel import Client
from distarray.client import DistArrayContext
class TestClient(unittest.TestCase):
def setUp(self):
self.client = Client()
self.dv = self.client[:]
def testCreateDAC(self):
'''Can we create a plain vanilla context?'''
dac = DistArrayContext(self.dv)
self.assertIs(dac.view, self.dv)
def testCreateDACwithTargets(self):
'''Can we create a context with a subset of engines?'''
dac = DistArrayContext(self.dv, targets=[0, 1])
self.assertIs(dac.view, self.dv)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
import unittest
from IPython.parallel import Client
from distarray.client import DistArrayContext
class TestDistArrayContext(unittest.TestCase):
def setUp(self):
self.client = Client()
self.dv = self.client[:]
def test_create_DAC(self):
'''Can we create a plain vanilla context?'''
dac = DistArrayContext(self.dv)
self.assertIs(dac.view, self.dv)
def test_create_DAC_with_targets(self):
'''Can we create a context with a subset of engines?'''
dac = DistArrayContext(self.dv, targets=[0, 1])
self.assertIs(dac.view, self.dv)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
Make class test-class name more specific
|
Make class test-class name more specific
... to make room for more client tests.
|
Python
|
bsd-3-clause
|
RaoUmer/distarray,enthought/distarray,RaoUmer/distarray,enthought/distarray
|
import unittest
from IPython.parallel import Client
from distarray.client import DistArrayContext
class TestClient(unittest.TestCase):
def setUp(self):
self.client = Client()
self.dv = self.client[:]
def testCreateDAC(self):
'''Can we create a plain vanilla context?'''
dac = DistArrayContext(self.dv)
self.assertIs(dac.view, self.dv)
def testCreateDACwithTargets(self):
'''Can we create a context with a subset of engines?'''
dac = DistArrayContext(self.dv, targets=[0, 1])
self.assertIs(dac.view, self.dv)
if __name__ == '__main__':
unittest.main(verbosity=2)
Make class test-class name more specific
... to make room for more client tests.
|
import unittest
from IPython.parallel import Client
from distarray.client import DistArrayContext
class TestDistArrayContext(unittest.TestCase):
def setUp(self):
self.client = Client()
self.dv = self.client[:]
def test_create_DAC(self):
'''Can we create a plain vanilla context?'''
dac = DistArrayContext(self.dv)
self.assertIs(dac.view, self.dv)
def test_create_DAC_with_targets(self):
'''Can we create a context with a subset of engines?'''
dac = DistArrayContext(self.dv, targets=[0, 1])
self.assertIs(dac.view, self.dv)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
<commit_before>import unittest
from IPython.parallel import Client
from distarray.client import DistArrayContext
class TestClient(unittest.TestCase):
def setUp(self):
self.client = Client()
self.dv = self.client[:]
def testCreateDAC(self):
'''Can we create a plain vanilla context?'''
dac = DistArrayContext(self.dv)
self.assertIs(dac.view, self.dv)
def testCreateDACwithTargets(self):
'''Can we create a context with a subset of engines?'''
dac = DistArrayContext(self.dv, targets=[0, 1])
self.assertIs(dac.view, self.dv)
if __name__ == '__main__':
unittest.main(verbosity=2)
<commit_msg>Make class test-class name more specific
... to make room for more client tests.<commit_after>
|
import unittest
from IPython.parallel import Client
from distarray.client import DistArrayContext
class TestDistArrayContext(unittest.TestCase):
def setUp(self):
self.client = Client()
self.dv = self.client[:]
def test_create_DAC(self):
'''Can we create a plain vanilla context?'''
dac = DistArrayContext(self.dv)
self.assertIs(dac.view, self.dv)
def test_create_DAC_with_targets(self):
'''Can we create a context with a subset of engines?'''
dac = DistArrayContext(self.dv, targets=[0, 1])
self.assertIs(dac.view, self.dv)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
import unittest
from IPython.parallel import Client
from distarray.client import DistArrayContext
class TestClient(unittest.TestCase):
def setUp(self):
self.client = Client()
self.dv = self.client[:]
def testCreateDAC(self):
'''Can we create a plain vanilla context?'''
dac = DistArrayContext(self.dv)
self.assertIs(dac.view, self.dv)
def testCreateDACwithTargets(self):
'''Can we create a context with a subset of engines?'''
dac = DistArrayContext(self.dv, targets=[0, 1])
self.assertIs(dac.view, self.dv)
if __name__ == '__main__':
unittest.main(verbosity=2)
Make class test-class name more specific
... to make room for more client tests.import unittest
from IPython.parallel import Client
from distarray.client import DistArrayContext
class TestDistArrayContext(unittest.TestCase):
def setUp(self):
self.client = Client()
self.dv = self.client[:]
def test_create_DAC(self):
'''Can we create a plain vanilla context?'''
dac = DistArrayContext(self.dv)
self.assertIs(dac.view, self.dv)
def test_create_DAC_with_targets(self):
'''Can we create a context with a subset of engines?'''
dac = DistArrayContext(self.dv, targets=[0, 1])
self.assertIs(dac.view, self.dv)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
<commit_before>import unittest
from IPython.parallel import Client
from distarray.client import DistArrayContext
class TestClient(unittest.TestCase):
def setUp(self):
self.client = Client()
self.dv = self.client[:]
def testCreateDAC(self):
'''Can we create a plain vanilla context?'''
dac = DistArrayContext(self.dv)
self.assertIs(dac.view, self.dv)
def testCreateDACwithTargets(self):
'''Can we create a context with a subset of engines?'''
dac = DistArrayContext(self.dv, targets=[0, 1])
self.assertIs(dac.view, self.dv)
if __name__ == '__main__':
unittest.main(verbosity=2)
<commit_msg>Make class test-class name more specific
... to make room for more client tests.<commit_after>import unittest
from IPython.parallel import Client
from distarray.client import DistArrayContext
class TestDistArrayContext(unittest.TestCase):
def setUp(self):
self.client = Client()
self.dv = self.client[:]
def test_create_DAC(self):
'''Can we create a plain vanilla context?'''
dac = DistArrayContext(self.dv)
self.assertIs(dac.view, self.dv)
def test_create_DAC_with_targets(self):
'''Can we create a context with a subset of engines?'''
dac = DistArrayContext(self.dv, targets=[0, 1])
self.assertIs(dac.view, self.dv)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
28944376472130d53a05f7473e7213c917207cd4
|
apartments/models.py
|
apartments/models.py
|
from sqlalchemy import create_engine, Column, DateTime, Float, Integer, String
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Listing(Base):
__tablename__ = 'listings'
id = Column(Integer, primary_key=True)
craigslist_id = Column(Integer, unique=True)
name = Column(String)
price = Column(Float)
link = Column(String, unique=True)
created = Column(DateTime)
area = Column(String)
geotag = Column(String)
lat = Column(Float)
lon = Column(Float)
location = Column(String)
nearest_stop = Column(String)
def __repr__(self):
return f'<Listing(name={self.name}, price={self.price}, craigslist_id={self.craigslist_id})>'
engine = create_engine('sqlite:///apartments.db')
Base.metadata.create_all(engine)
|
Add model representing a listing
|
Add model representing a listing
|
Python
|
mit
|
rlucioni/craigbot,rlucioni/craigbot,rlucioni/apartments
|
Add model representing a listing
|
from sqlalchemy import create_engine, Column, DateTime, Float, Integer, String
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Listing(Base):
__tablename__ = 'listings'
id = Column(Integer, primary_key=True)
craigslist_id = Column(Integer, unique=True)
name = Column(String)
price = Column(Float)
link = Column(String, unique=True)
created = Column(DateTime)
area = Column(String)
geotag = Column(String)
lat = Column(Float)
lon = Column(Float)
location = Column(String)
nearest_stop = Column(String)
def __repr__(self):
return f'<Listing(name={self.name}, price={self.price}, craigslist_id={self.craigslist_id})>'
engine = create_engine('sqlite:///apartments.db')
Base.metadata.create_all(engine)
|
<commit_before><commit_msg>Add model representing a listing<commit_after>
|
from sqlalchemy import create_engine, Column, DateTime, Float, Integer, String
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Listing(Base):
__tablename__ = 'listings'
id = Column(Integer, primary_key=True)
craigslist_id = Column(Integer, unique=True)
name = Column(String)
price = Column(Float)
link = Column(String, unique=True)
created = Column(DateTime)
area = Column(String)
geotag = Column(String)
lat = Column(Float)
lon = Column(Float)
location = Column(String)
nearest_stop = Column(String)
def __repr__(self):
return f'<Listing(name={self.name}, price={self.price}, craigslist_id={self.craigslist_id})>'
engine = create_engine('sqlite:///apartments.db')
Base.metadata.create_all(engine)
|
Add model representing a listingfrom sqlalchemy import create_engine, Column, DateTime, Float, Integer, String
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Listing(Base):
__tablename__ = 'listings'
id = Column(Integer, primary_key=True)
craigslist_id = Column(Integer, unique=True)
name = Column(String)
price = Column(Float)
link = Column(String, unique=True)
created = Column(DateTime)
area = Column(String)
geotag = Column(String)
lat = Column(Float)
lon = Column(Float)
location = Column(String)
nearest_stop = Column(String)
def __repr__(self):
return f'<Listing(name={self.name}, price={self.price}, craigslist_id={self.craigslist_id})>'
engine = create_engine('sqlite:///apartments.db')
Base.metadata.create_all(engine)
|
<commit_before><commit_msg>Add model representing a listing<commit_after>from sqlalchemy import create_engine, Column, DateTime, Float, Integer, String
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Listing(Base):
__tablename__ = 'listings'
id = Column(Integer, primary_key=True)
craigslist_id = Column(Integer, unique=True)
name = Column(String)
price = Column(Float)
link = Column(String, unique=True)
created = Column(DateTime)
area = Column(String)
geotag = Column(String)
lat = Column(Float)
lon = Column(Float)
location = Column(String)
nearest_stop = Column(String)
def __repr__(self):
return f'<Listing(name={self.name}, price={self.price}, craigslist_id={self.craigslist_id})>'
engine = create_engine('sqlite:///apartments.db')
Base.metadata.create_all(engine)
|
|
38cbc73f70a9ca896a29d7fa2e000388bbf40d88
|
DilipadTopicModelling/experiment_get_results.py
|
DilipadTopicModelling/experiment_get_results.py
|
import logging
import os
import pandas as pd
from CPTCorpus import CPTCorpus
from CPT_Gibbs import GibbsSampler
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.INFO)
# select experiment to get parameters from
nTopics = 100
start = 80
end = 199
alpha = 50.0/nTopics
beta = 0.02
nIter = 200
# load corpus
data_dir = '/home/jvdzwaan/data/tmp/generated/test_exp/'
corpus = CPTCorpus.load('{}corpus.json'.format(data_dir),
topicDict='{}/topicDict.dict'.format(data_dir),
opinionDict='{}/opinionDict.dict'.format(data_dir))
out_dir = '/home/jvdzwaan/data/tmp/generated/test_exp/{}'.format(nTopics)
sampler = GibbsSampler(corpus, nTopics=nTopics, nIter=nIter, alpha=alpha,
beta=beta, beta_o=beta, out_dir=out_dir)
sampler._initialize()
sampler.estimate_parameters(start=start, end=end)
pd.DataFrame(sampler.theta).to_csv(os.path.join(out_dir, 'theta_{}.csv'.
format(nTopics)))
topics = sampler.topics_to_df(phi=sampler.topics, words=corpus.topic_words())
topics.to_csv(os.path.join(out_dir, 'topics_{}.csv'.format(nTopics)))
for i, p in enumerate(sampler.corpus.perspectives):
opinions = sampler.topics_to_df(phi=sampler.opinions[i],
words=corpus.opinion_words())
opinions.to_csv(os.path.join(out_dir,
'opinions_{}_{}.csv'.format(p.name, nTopics)))
|
Add script to generate data from an experiment
|
Add script to generate data from an experiment
Added a script that generates csv files of estimations of theta,
phi_topic and phi_opinion and saves them to disk.
|
Python
|
apache-2.0
|
NLeSC/cptm,NLeSC/cptm
|
Add script to generate data from an experiment
Added a script that generates csv files of estimations of theta,
phi_topic and phi_opinion and saves them to disk.
|
import logging
import os
import pandas as pd
from CPTCorpus import CPTCorpus
from CPT_Gibbs import GibbsSampler
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.INFO)
# select experiment to get parameters from
nTopics = 100
start = 80
end = 199
alpha = 50.0/nTopics
beta = 0.02
nIter = 200
# load corpus
data_dir = '/home/jvdzwaan/data/tmp/generated/test_exp/'
corpus = CPTCorpus.load('{}corpus.json'.format(data_dir),
topicDict='{}/topicDict.dict'.format(data_dir),
opinionDict='{}/opinionDict.dict'.format(data_dir))
out_dir = '/home/jvdzwaan/data/tmp/generated/test_exp/{}'.format(nTopics)
sampler = GibbsSampler(corpus, nTopics=nTopics, nIter=nIter, alpha=alpha,
beta=beta, beta_o=beta, out_dir=out_dir)
sampler._initialize()
sampler.estimate_parameters(start=start, end=end)
pd.DataFrame(sampler.theta).to_csv(os.path.join(out_dir, 'theta_{}.csv'.
format(nTopics)))
topics = sampler.topics_to_df(phi=sampler.topics, words=corpus.topic_words())
topics.to_csv(os.path.join(out_dir, 'topics_{}.csv'.format(nTopics)))
for i, p in enumerate(sampler.corpus.perspectives):
opinions = sampler.topics_to_df(phi=sampler.opinions[i],
words=corpus.opinion_words())
opinions.to_csv(os.path.join(out_dir,
'opinions_{}_{}.csv'.format(p.name, nTopics)))
|
<commit_before><commit_msg>Add script to generate data from an experiment
Added a script that generates csv files of estimations of theta,
phi_topic and phi_opinion and saves them to disk.<commit_after>
|
import logging
import os
import pandas as pd
from CPTCorpus import CPTCorpus
from CPT_Gibbs import GibbsSampler
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.INFO)
# select experiment to get parameters from
nTopics = 100
start = 80
end = 199
alpha = 50.0/nTopics
beta = 0.02
nIter = 200
# load corpus
data_dir = '/home/jvdzwaan/data/tmp/generated/test_exp/'
corpus = CPTCorpus.load('{}corpus.json'.format(data_dir),
topicDict='{}/topicDict.dict'.format(data_dir),
opinionDict='{}/opinionDict.dict'.format(data_dir))
out_dir = '/home/jvdzwaan/data/tmp/generated/test_exp/{}'.format(nTopics)
sampler = GibbsSampler(corpus, nTopics=nTopics, nIter=nIter, alpha=alpha,
beta=beta, beta_o=beta, out_dir=out_dir)
sampler._initialize()
sampler.estimate_parameters(start=start, end=end)
pd.DataFrame(sampler.theta).to_csv(os.path.join(out_dir, 'theta_{}.csv'.
format(nTopics)))
topics = sampler.topics_to_df(phi=sampler.topics, words=corpus.topic_words())
topics.to_csv(os.path.join(out_dir, 'topics_{}.csv'.format(nTopics)))
for i, p in enumerate(sampler.corpus.perspectives):
opinions = sampler.topics_to_df(phi=sampler.opinions[i],
words=corpus.opinion_words())
opinions.to_csv(os.path.join(out_dir,
'opinions_{}_{}.csv'.format(p.name, nTopics)))
|
Add script to generate data from an experiment
Added a script that generates csv files of estimations of theta,
phi_topic and phi_opinion and saves them to disk.import logging
import os
import pandas as pd
from CPTCorpus import CPTCorpus
from CPT_Gibbs import GibbsSampler
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.INFO)
# select experiment to get parameters from
nTopics = 100
start = 80
end = 199
alpha = 50.0/nTopics
beta = 0.02
nIter = 200
# load corpus
data_dir = '/home/jvdzwaan/data/tmp/generated/test_exp/'
corpus = CPTCorpus.load('{}corpus.json'.format(data_dir),
topicDict='{}/topicDict.dict'.format(data_dir),
opinionDict='{}/opinionDict.dict'.format(data_dir))
out_dir = '/home/jvdzwaan/data/tmp/generated/test_exp/{}'.format(nTopics)
sampler = GibbsSampler(corpus, nTopics=nTopics, nIter=nIter, alpha=alpha,
beta=beta, beta_o=beta, out_dir=out_dir)
sampler._initialize()
sampler.estimate_parameters(start=start, end=end)
pd.DataFrame(sampler.theta).to_csv(os.path.join(out_dir, 'theta_{}.csv'.
format(nTopics)))
topics = sampler.topics_to_df(phi=sampler.topics, words=corpus.topic_words())
topics.to_csv(os.path.join(out_dir, 'topics_{}.csv'.format(nTopics)))
for i, p in enumerate(sampler.corpus.perspectives):
opinions = sampler.topics_to_df(phi=sampler.opinions[i],
words=corpus.opinion_words())
opinions.to_csv(os.path.join(out_dir,
'opinions_{}_{}.csv'.format(p.name, nTopics)))
|
<commit_before><commit_msg>Add script to generate data from an experiment
Added a script that generates csv files of estimations of theta,
phi_topic and phi_opinion and saves them to disk.<commit_after>import logging
import os
import pandas as pd
from CPTCorpus import CPTCorpus
from CPT_Gibbs import GibbsSampler
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.INFO)
# select experiment to get parameters from
nTopics = 100
start = 80
end = 199
alpha = 50.0/nTopics
beta = 0.02
nIter = 200
# load corpus
data_dir = '/home/jvdzwaan/data/tmp/generated/test_exp/'
corpus = CPTCorpus.load('{}corpus.json'.format(data_dir),
topicDict='{}/topicDict.dict'.format(data_dir),
opinionDict='{}/opinionDict.dict'.format(data_dir))
out_dir = '/home/jvdzwaan/data/tmp/generated/test_exp/{}'.format(nTopics)
sampler = GibbsSampler(corpus, nTopics=nTopics, nIter=nIter, alpha=alpha,
beta=beta, beta_o=beta, out_dir=out_dir)
sampler._initialize()
sampler.estimate_parameters(start=start, end=end)
pd.DataFrame(sampler.theta).to_csv(os.path.join(out_dir, 'theta_{}.csv'.
format(nTopics)))
topics = sampler.topics_to_df(phi=sampler.topics, words=corpus.topic_words())
topics.to_csv(os.path.join(out_dir, 'topics_{}.csv'.format(nTopics)))
for i, p in enumerate(sampler.corpus.perspectives):
opinions = sampler.topics_to_df(phi=sampler.opinions[i],
words=corpus.opinion_words())
opinions.to_csv(os.path.join(out_dir,
'opinions_{}_{}.csv'.format(p.name, nTopics)))
|
|
656d94c0375f6a96cc3a9d4b3227d8f19afe3dea
|
control/systems/main.py
|
control/systems/main.py
|
import numpy as np
Kt = 1.41/89.0
Kv = 5840.0/3.0
G = 10.0
J = 4.0*(2.54**2.0)/2.0 # 4 kg on a 1 inch pully
R = 12.0/89.0
A = np.asarray([[0, 1],
[0, -(Kt*Kv)/((G**2)*J*R)]])
B = np.asarray([[0],
[Kt/(G*J*R)]])
|
Add lemon drop elevator model
|
Add lemon drop elevator model
|
Python
|
mit
|
WesleyAC/toybox,WesleyAC/toybox,WesleyAC/toybox,WesleyAC/toybox,WesleyAC/toybox
|
Add lemon drop elevator model
|
import numpy as np
Kt = 1.41/89.0
Kv = 5840.0/3.0
G = 10.0
J = 4.0*(2.54**2.0)/2.0 # 4 kg on a 1 inch pully
R = 12.0/89.0
A = np.asarray([[0, 1],
[0, -(Kt*Kv)/((G**2)*J*R)]])
B = np.asarray([[0],
[Kt/(G*J*R)]])
|
<commit_before><commit_msg>Add lemon drop elevator model<commit_after>
|
import numpy as np
Kt = 1.41/89.0
Kv = 5840.0/3.0
G = 10.0
J = 4.0*(2.54**2.0)/2.0 # 4 kg on a 1 inch pully
R = 12.0/89.0
A = np.asarray([[0, 1],
[0, -(Kt*Kv)/((G**2)*J*R)]])
B = np.asarray([[0],
[Kt/(G*J*R)]])
|
Add lemon drop elevator modelimport numpy as np
Kt = 1.41/89.0
Kv = 5840.0/3.0
G = 10.0
J = 4.0*(2.54**2.0)/2.0 # 4 kg on a 1 inch pully
R = 12.0/89.0
A = np.asarray([[0, 1],
[0, -(Kt*Kv)/((G**2)*J*R)]])
B = np.asarray([[0],
[Kt/(G*J*R)]])
|
<commit_before><commit_msg>Add lemon drop elevator model<commit_after>import numpy as np
Kt = 1.41/89.0
Kv = 5840.0/3.0
G = 10.0
J = 4.0*(2.54**2.0)/2.0 # 4 kg on a 1 inch pully
R = 12.0/89.0
A = np.asarray([[0, 1],
[0, -(Kt*Kv)/((G**2)*J*R)]])
B = np.asarray([[0],
[Kt/(G*J*R)]])
|
|
3fd4244dbfd33bbf2fa369d81756e82b1cf1c467
|
src/mmw/apps/modeling/migrations/0041_clear_nlcd2019_gwlfe_results.py
|
src/mmw/apps/modeling/migrations/0041_clear_nlcd2019_gwlfe_results.py
|
# Generated by Django 3.2.13 on 2022-10-17 13:47
from django.db import migrations
def clear_nlcd2019_gwlfe_results(apps, schema_editor):
"""
Clear the results for all scenarios belonging to GWLF-E projects made after
the release of 1.33.0, which had incorrectly aligned NLCD19 2019 on
2022-01-17:
https://github.com/WikiWatershed/model-my-watershed/releases/tag/1.33.0
These results will be recalculated with the correclty aligned NLCD19 values
when these projects are accessed again.
"""
Project = apps.get_model('modeling', 'Project')
Scenario = apps.get_model('modeling', 'Scenario')
Project.objects.filter(
model_package='gwlfe',
created_at__gte='2022-01-17',
).update(
gis_data=None,
mapshed_job_uuid=None,
subbasin_mapshed_job_uuid=None,
)
Scenario.objects.filter(
project__model_package='gwlfe',
project__created_at__gte='2022-01-17',
).update(
results='[]',
modification_hash='',
)
class Migration(migrations.Migration):
dependencies = [
('modeling', '0040_clear_nlcd2019_tr55_results'),
]
operations = [
migrations.RunPython(clear_nlcd2019_gwlfe_results),
]
|
Clear out unaligned NLCD19 GWLF-E results
|
Clear out unaligned NLCD19 GWLF-E results
Adds a migration that clears out all stored results for GWLF-E
projects created on or after 2022-01-17, which is when 1.33.0
was released with incorrectly aligned NLCD19 layers, which had
also been made the default. Thus, every project made after then
had slighly incorrect results.
This migration clears out those bad results. The next time that
project is loaded in the UI, all its results will be recalculated.
This allows us to rebuild the results over time, and not overload
the system all at once.
|
Python
|
apache-2.0
|
WikiWatershed/model-my-watershed,WikiWatershed/model-my-watershed,WikiWatershed/model-my-watershed,WikiWatershed/model-my-watershed,WikiWatershed/model-my-watershed
|
Clear out unaligned NLCD19 GWLF-E results
Adds a migration that clears out all stored results for GWLF-E
projects created on or after 2022-01-17, which is when 1.33.0
was released with incorrectly aligned NLCD19 layers, which had
also been made the default. Thus, every project made after then
had slighly incorrect results.
This migration clears out those bad results. The next time that
project is loaded in the UI, all its results will be recalculated.
This allows us to rebuild the results over time, and not overload
the system all at once.
|
# Generated by Django 3.2.13 on 2022-10-17 13:47
from django.db import migrations
def clear_nlcd2019_gwlfe_results(apps, schema_editor):
"""
Clear the results for all scenarios belonging to GWLF-E projects made after
the release of 1.33.0, which had incorrectly aligned NLCD19 2019 on
2022-01-17:
https://github.com/WikiWatershed/model-my-watershed/releases/tag/1.33.0
These results will be recalculated with the correclty aligned NLCD19 values
when these projects are accessed again.
"""
Project = apps.get_model('modeling', 'Project')
Scenario = apps.get_model('modeling', 'Scenario')
Project.objects.filter(
model_package='gwlfe',
created_at__gte='2022-01-17',
).update(
gis_data=None,
mapshed_job_uuid=None,
subbasin_mapshed_job_uuid=None,
)
Scenario.objects.filter(
project__model_package='gwlfe',
project__created_at__gte='2022-01-17',
).update(
results='[]',
modification_hash='',
)
class Migration(migrations.Migration):
dependencies = [
('modeling', '0040_clear_nlcd2019_tr55_results'),
]
operations = [
migrations.RunPython(clear_nlcd2019_gwlfe_results),
]
|
<commit_before><commit_msg>Clear out unaligned NLCD19 GWLF-E results
Adds a migration that clears out all stored results for GWLF-E
projects created on or after 2022-01-17, which is when 1.33.0
was released with incorrectly aligned NLCD19 layers, which had
also been made the default. Thus, every project made after then
had slighly incorrect results.
This migration clears out those bad results. The next time that
project is loaded in the UI, all its results will be recalculated.
This allows us to rebuild the results over time, and not overload
the system all at once.<commit_after>
|
# Generated by Django 3.2.13 on 2022-10-17 13:47
from django.db import migrations
def clear_nlcd2019_gwlfe_results(apps, schema_editor):
"""
Clear the results for all scenarios belonging to GWLF-E projects made after
the release of 1.33.0, which had incorrectly aligned NLCD19 2019 on
2022-01-17:
https://github.com/WikiWatershed/model-my-watershed/releases/tag/1.33.0
These results will be recalculated with the correclty aligned NLCD19 values
when these projects are accessed again.
"""
Project = apps.get_model('modeling', 'Project')
Scenario = apps.get_model('modeling', 'Scenario')
Project.objects.filter(
model_package='gwlfe',
created_at__gte='2022-01-17',
).update(
gis_data=None,
mapshed_job_uuid=None,
subbasin_mapshed_job_uuid=None,
)
Scenario.objects.filter(
project__model_package='gwlfe',
project__created_at__gte='2022-01-17',
).update(
results='[]',
modification_hash='',
)
class Migration(migrations.Migration):
dependencies = [
('modeling', '0040_clear_nlcd2019_tr55_results'),
]
operations = [
migrations.RunPython(clear_nlcd2019_gwlfe_results),
]
|
Clear out unaligned NLCD19 GWLF-E results
Adds a migration that clears out all stored results for GWLF-E
projects created on or after 2022-01-17, which is when 1.33.0
was released with incorrectly aligned NLCD19 layers, which had
also been made the default. Thus, every project made after then
had slighly incorrect results.
This migration clears out those bad results. The next time that
project is loaded in the UI, all its results will be recalculated.
This allows us to rebuild the results over time, and not overload
the system all at once.# Generated by Django 3.2.13 on 2022-10-17 13:47
from django.db import migrations
def clear_nlcd2019_gwlfe_results(apps, schema_editor):
"""
Clear the results for all scenarios belonging to GWLF-E projects made after
the release of 1.33.0, which had incorrectly aligned NLCD19 2019 on
2022-01-17:
https://github.com/WikiWatershed/model-my-watershed/releases/tag/1.33.0
These results will be recalculated with the correclty aligned NLCD19 values
when these projects are accessed again.
"""
Project = apps.get_model('modeling', 'Project')
Scenario = apps.get_model('modeling', 'Scenario')
Project.objects.filter(
model_package='gwlfe',
created_at__gte='2022-01-17',
).update(
gis_data=None,
mapshed_job_uuid=None,
subbasin_mapshed_job_uuid=None,
)
Scenario.objects.filter(
project__model_package='gwlfe',
project__created_at__gte='2022-01-17',
).update(
results='[]',
modification_hash='',
)
class Migration(migrations.Migration):
dependencies = [
('modeling', '0040_clear_nlcd2019_tr55_results'),
]
operations = [
migrations.RunPython(clear_nlcd2019_gwlfe_results),
]
|
<commit_before><commit_msg>Clear out unaligned NLCD19 GWLF-E results
Adds a migration that clears out all stored results for GWLF-E
projects created on or after 2022-01-17, which is when 1.33.0
was released with incorrectly aligned NLCD19 layers, which had
also been made the default. Thus, every project made after then
had slighly incorrect results.
This migration clears out those bad results. The next time that
project is loaded in the UI, all its results will be recalculated.
This allows us to rebuild the results over time, and not overload
the system all at once.<commit_after># Generated by Django 3.2.13 on 2022-10-17 13:47
from django.db import migrations
def clear_nlcd2019_gwlfe_results(apps, schema_editor):
"""
Clear the results for all scenarios belonging to GWLF-E projects made after
the release of 1.33.0, which had incorrectly aligned NLCD19 2019 on
2022-01-17:
https://github.com/WikiWatershed/model-my-watershed/releases/tag/1.33.0
These results will be recalculated with the correclty aligned NLCD19 values
when these projects are accessed again.
"""
Project = apps.get_model('modeling', 'Project')
Scenario = apps.get_model('modeling', 'Scenario')
Project.objects.filter(
model_package='gwlfe',
created_at__gte='2022-01-17',
).update(
gis_data=None,
mapshed_job_uuid=None,
subbasin_mapshed_job_uuid=None,
)
Scenario.objects.filter(
project__model_package='gwlfe',
project__created_at__gte='2022-01-17',
).update(
results='[]',
modification_hash='',
)
class Migration(migrations.Migration):
dependencies = [
('modeling', '0040_clear_nlcd2019_tr55_results'),
]
operations = [
migrations.RunPython(clear_nlcd2019_gwlfe_results),
]
|
|
2cb7e09df0a8ec6fda707cccd1e9f8f00e15083c
|
migrations/versions/9ef49beab95_.py
|
migrations/versions/9ef49beab95_.py
|
"""empty message
Revision ID: 9ef49beab95
Revises: 4b7b5a7ddc5c
Create Date: 2016-02-07 15:00:45.614000
"""
# revision identifiers, used by Alembic.
revision = '9ef49beab95'
down_revision = '4b7b5a7ddc5c'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
|
Adjust the sorting of restaurants.
|
Adjust the sorting of restaurants.
|
Python
|
mit
|
clementlefevre/hunger-game,clementlefevre/hunger-game,clementlefevre/hunger-game
|
Adjust the sorting of restaurants.
|
"""empty message
Revision ID: 9ef49beab95
Revises: 4b7b5a7ddc5c
Create Date: 2016-02-07 15:00:45.614000
"""
# revision identifiers, used by Alembic.
revision = '9ef49beab95'
down_revision = '4b7b5a7ddc5c'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
|
<commit_before><commit_msg>Adjust the sorting of restaurants.<commit_after>
|
"""empty message
Revision ID: 9ef49beab95
Revises: 4b7b5a7ddc5c
Create Date: 2016-02-07 15:00:45.614000
"""
# revision identifiers, used by Alembic.
revision = '9ef49beab95'
down_revision = '4b7b5a7ddc5c'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
|
Adjust the sorting of restaurants."""empty message
Revision ID: 9ef49beab95
Revises: 4b7b5a7ddc5c
Create Date: 2016-02-07 15:00:45.614000
"""
# revision identifiers, used by Alembic.
revision = '9ef49beab95'
down_revision = '4b7b5a7ddc5c'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
|
<commit_before><commit_msg>Adjust the sorting of restaurants.<commit_after>"""empty message
Revision ID: 9ef49beab95
Revises: 4b7b5a7ddc5c
Create Date: 2016-02-07 15:00:45.614000
"""
# revision identifiers, used by Alembic.
revision = '9ef49beab95'
down_revision = '4b7b5a7ddc5c'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
|
|
ed20a93e917cfdddc5cd49cc6446b6e80fb4573d
|
makam/migrations/0007_auto_20150812_1615.py
|
makam/migrations/0007_auto_20150812_1615.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django_extensions.db.fields
class Migration(migrations.Migration):
dependencies = [
('makam', '0006_auto_20150727_1631'),
]
operations = [
migrations.AlterField(
model_name='symbtr',
name='uuid',
field=django_extensions.db.fields.UUIDField(max_length=36, editable=False, blank=True),
),
migrations.RunSQL('alter table makam_symbtr alter COLUMN uuid type uuid USING ("uuid"::uuid)'),
migrations.AlterField(
model_name='symbtr',
name='uuid',
field=models.UUIDField(db_index=True),
),
]
|
Migrate symbtr uuid field to django type
|
Migrate symbtr uuid field to django type
|
Python
|
agpl-3.0
|
MTG/dunya,MTG/dunya,MTG/dunya,MTG/dunya
|
Migrate symbtr uuid field to django type
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django_extensions.db.fields
class Migration(migrations.Migration):
dependencies = [
('makam', '0006_auto_20150727_1631'),
]
operations = [
migrations.AlterField(
model_name='symbtr',
name='uuid',
field=django_extensions.db.fields.UUIDField(max_length=36, editable=False, blank=True),
),
migrations.RunSQL('alter table makam_symbtr alter COLUMN uuid type uuid USING ("uuid"::uuid)'),
migrations.AlterField(
model_name='symbtr',
name='uuid',
field=models.UUIDField(db_index=True),
),
]
|
<commit_before><commit_msg>Migrate symbtr uuid field to django type<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django_extensions.db.fields
class Migration(migrations.Migration):
dependencies = [
('makam', '0006_auto_20150727_1631'),
]
operations = [
migrations.AlterField(
model_name='symbtr',
name='uuid',
field=django_extensions.db.fields.UUIDField(max_length=36, editable=False, blank=True),
),
migrations.RunSQL('alter table makam_symbtr alter COLUMN uuid type uuid USING ("uuid"::uuid)'),
migrations.AlterField(
model_name='symbtr',
name='uuid',
field=models.UUIDField(db_index=True),
),
]
|
Migrate symbtr uuid field to django type# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django_extensions.db.fields
class Migration(migrations.Migration):
dependencies = [
('makam', '0006_auto_20150727_1631'),
]
operations = [
migrations.AlterField(
model_name='symbtr',
name='uuid',
field=django_extensions.db.fields.UUIDField(max_length=36, editable=False, blank=True),
),
migrations.RunSQL('alter table makam_symbtr alter COLUMN uuid type uuid USING ("uuid"::uuid)'),
migrations.AlterField(
model_name='symbtr',
name='uuid',
field=models.UUIDField(db_index=True),
),
]
|
<commit_before><commit_msg>Migrate symbtr uuid field to django type<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django_extensions.db.fields
class Migration(migrations.Migration):
dependencies = [
('makam', '0006_auto_20150727_1631'),
]
operations = [
migrations.AlterField(
model_name='symbtr',
name='uuid',
field=django_extensions.db.fields.UUIDField(max_length=36, editable=False, blank=True),
),
migrations.RunSQL('alter table makam_symbtr alter COLUMN uuid type uuid USING ("uuid"::uuid)'),
migrations.AlterField(
model_name='symbtr',
name='uuid',
field=models.UUIDField(db_index=True),
),
]
|
|
52c50ca6e4c5d2ee75300617c5da118fb1136e76
|
mplstyles/plots.py
|
mplstyles/plots.py
|
from matplotlib import cm
import matplotlib.pyplot as plt
from mplstyles import cmap as colormap
import numpy as np
def contour_image(x,y,Z,cmap=None,vmax=None,vmin=None,interpolation='nearest',contour_labelsize=9,contour_opts={},imshow_opts={},clegendlabels=[],label=False):
ax = plt.gca()
x_delta = float((x[-1]-x[0]))/(len(x)-1)/2.
y_delta = float((y[-1]-y[0]))/(len(y)-1)/2.
extent=(x[0],x[-1],y[0],y[-1])
extent_delta = (x[0]-x_delta,x[-1]+x_delta,y[0]-y_delta,y[-1]+y_delta)
ax.set_xlim(x[0],x[-1])
ax.set_ylim(y[0],y[-1])
if cmap is None:
cmap = colormap.reverse(cm.Blues)
Z = Z.transpose()
#plt.contourf(X,Y,self.pdata,interpolation=interpolation)
cs = ax.imshow(Z,interpolation=interpolation,origin='lower',aspect='auto',extent=extent_delta,cmap=cmap,vmax=vmax,vmin=vmin, **imshow_opts)
# Draw contours
X, Y = np.meshgrid(x, y)
CS = ax.contour(X, Y, Z, extent=extent, origin='lower', **contour_opts )
# Label contours
if label:
ax.clabel(CS, fontsize=contour_labelsize)
# Show contours in legend if desired
if len(clegendlabels) > 0:
for i in range(len(clegendlabels)):
CS.collections[i].set_label(clegendlabels[i])
#ax.legend()
return cs, CS
|
Add custom plot style contour_image.
|
Add custom plot style contour_image.
|
Python
|
mit
|
matthewwardrop/python-mplstyles,matthewwardrop/python-mplkit,matthewwardrop/python-mplstyles,matthewwardrop/python-mplkit
|
Add custom plot style contour_image.
|
from matplotlib import cm
import matplotlib.pyplot as plt
from mplstyles import cmap as colormap
import numpy as np
def contour_image(x,y,Z,cmap=None,vmax=None,vmin=None,interpolation='nearest',contour_labelsize=9,contour_opts={},imshow_opts={},clegendlabels=[],label=False):
ax = plt.gca()
x_delta = float((x[-1]-x[0]))/(len(x)-1)/2.
y_delta = float((y[-1]-y[0]))/(len(y)-1)/2.
extent=(x[0],x[-1],y[0],y[-1])
extent_delta = (x[0]-x_delta,x[-1]+x_delta,y[0]-y_delta,y[-1]+y_delta)
ax.set_xlim(x[0],x[-1])
ax.set_ylim(y[0],y[-1])
if cmap is None:
cmap = colormap.reverse(cm.Blues)
Z = Z.transpose()
#plt.contourf(X,Y,self.pdata,interpolation=interpolation)
cs = ax.imshow(Z,interpolation=interpolation,origin='lower',aspect='auto',extent=extent_delta,cmap=cmap,vmax=vmax,vmin=vmin, **imshow_opts)
# Draw contours
X, Y = np.meshgrid(x, y)
CS = ax.contour(X, Y, Z, extent=extent, origin='lower', **contour_opts )
# Label contours
if label:
ax.clabel(CS, fontsize=contour_labelsize)
# Show contours in legend if desired
if len(clegendlabels) > 0:
for i in range(len(clegendlabels)):
CS.collections[i].set_label(clegendlabels[i])
#ax.legend()
return cs, CS
|
<commit_before><commit_msg>Add custom plot style contour_image.<commit_after>
|
from matplotlib import cm
import matplotlib.pyplot as plt
from mplstyles import cmap as colormap
import numpy as np
def contour_image(x,y,Z,cmap=None,vmax=None,vmin=None,interpolation='nearest',contour_labelsize=9,contour_opts={},imshow_opts={},clegendlabels=[],label=False):
ax = plt.gca()
x_delta = float((x[-1]-x[0]))/(len(x)-1)/2.
y_delta = float((y[-1]-y[0]))/(len(y)-1)/2.
extent=(x[0],x[-1],y[0],y[-1])
extent_delta = (x[0]-x_delta,x[-1]+x_delta,y[0]-y_delta,y[-1]+y_delta)
ax.set_xlim(x[0],x[-1])
ax.set_ylim(y[0],y[-1])
if cmap is None:
cmap = colormap.reverse(cm.Blues)
Z = Z.transpose()
#plt.contourf(X,Y,self.pdata,interpolation=interpolation)
cs = ax.imshow(Z,interpolation=interpolation,origin='lower',aspect='auto',extent=extent_delta,cmap=cmap,vmax=vmax,vmin=vmin, **imshow_opts)
# Draw contours
X, Y = np.meshgrid(x, y)
CS = ax.contour(X, Y, Z, extent=extent, origin='lower', **contour_opts )
# Label contours
if label:
ax.clabel(CS, fontsize=contour_labelsize)
# Show contours in legend if desired
if len(clegendlabels) > 0:
for i in range(len(clegendlabels)):
CS.collections[i].set_label(clegendlabels[i])
#ax.legend()
return cs, CS
|
Add custom plot style contour_image.from matplotlib import cm
import matplotlib.pyplot as plt
from mplstyles import cmap as colormap
import numpy as np
def contour_image(x,y,Z,cmap=None,vmax=None,vmin=None,interpolation='nearest',contour_labelsize=9,contour_opts={},imshow_opts={},clegendlabels=[],label=False):
ax = plt.gca()
x_delta = float((x[-1]-x[0]))/(len(x)-1)/2.
y_delta = float((y[-1]-y[0]))/(len(y)-1)/2.
extent=(x[0],x[-1],y[0],y[-1])
extent_delta = (x[0]-x_delta,x[-1]+x_delta,y[0]-y_delta,y[-1]+y_delta)
ax.set_xlim(x[0],x[-1])
ax.set_ylim(y[0],y[-1])
if cmap is None:
cmap = colormap.reverse(cm.Blues)
Z = Z.transpose()
#plt.contourf(X,Y,self.pdata,interpolation=interpolation)
cs = ax.imshow(Z,interpolation=interpolation,origin='lower',aspect='auto',extent=extent_delta,cmap=cmap,vmax=vmax,vmin=vmin, **imshow_opts)
# Draw contours
X, Y = np.meshgrid(x, y)
CS = ax.contour(X, Y, Z, extent=extent, origin='lower', **contour_opts )
# Label contours
if label:
ax.clabel(CS, fontsize=contour_labelsize)
# Show contours in legend if desired
if len(clegendlabels) > 0:
for i in range(len(clegendlabels)):
CS.collections[i].set_label(clegendlabels[i])
#ax.legend()
return cs, CS
|
<commit_before><commit_msg>Add custom plot style contour_image.<commit_after>from matplotlib import cm
import matplotlib.pyplot as plt
from mplstyles import cmap as colormap
import numpy as np
def contour_image(x,y,Z,cmap=None,vmax=None,vmin=None,interpolation='nearest',contour_labelsize=9,contour_opts={},imshow_opts={},clegendlabels=[],label=False):
ax = plt.gca()
x_delta = float((x[-1]-x[0]))/(len(x)-1)/2.
y_delta = float((y[-1]-y[0]))/(len(y)-1)/2.
extent=(x[0],x[-1],y[0],y[-1])
extent_delta = (x[0]-x_delta,x[-1]+x_delta,y[0]-y_delta,y[-1]+y_delta)
ax.set_xlim(x[0],x[-1])
ax.set_ylim(y[0],y[-1])
if cmap is None:
cmap = colormap.reverse(cm.Blues)
Z = Z.transpose()
#plt.contourf(X,Y,self.pdata,interpolation=interpolation)
cs = ax.imshow(Z,interpolation=interpolation,origin='lower',aspect='auto',extent=extent_delta,cmap=cmap,vmax=vmax,vmin=vmin, **imshow_opts)
# Draw contours
X, Y = np.meshgrid(x, y)
CS = ax.contour(X, Y, Z, extent=extent, origin='lower', **contour_opts )
# Label contours
if label:
ax.clabel(CS, fontsize=contour_labelsize)
# Show contours in legend if desired
if len(clegendlabels) > 0:
for i in range(len(clegendlabels)):
CS.collections[i].set_label(clegendlabels[i])
#ax.legend()
return cs, CS
|
|
45b789010409e4e2e2afc88cb776c8b70e7768ec
|
dakota/tests/test_dakota_base.py
|
dakota/tests/test_dakota_base.py
|
#!/usr/bin/env python
#
# Tests for dakota.dakota_base module.
#
# Call with:
# $ nosetests -sv
#
# Mark Piper (mark.piper@colorado.edu)
import os
import filecmp
from nose.tools import *
from dakota.dakota_base import DakotaBase
# Fixtures -------------------------------------------------------------
def setup_module():
"""Called before any tests are performed."""
print('\n*** DakotaBase tests')
def teardown_module():
"""Called after all tests have completed."""
pass
# Tests ----------------------------------------------------------------
@raises(TypeError)
def test_instantiate():
"""Test whether DakotaBase fails to instantiate."""
d = DakotaBase()
|
Add unit test for DakotaBase
|
Add unit test for DakotaBase
Still need to determine how to test its non-abstract methods.
|
Python
|
mit
|
csdms/dakota,csdms/dakota
|
Add unit test for DakotaBase
Still need to determine how to test its non-abstract methods.
|
#!/usr/bin/env python
#
# Tests for dakota.dakota_base module.
#
# Call with:
# $ nosetests -sv
#
# Mark Piper (mark.piper@colorado.edu)
import os
import filecmp
from nose.tools import *
from dakota.dakota_base import DakotaBase
# Fixtures -------------------------------------------------------------
def setup_module():
"""Called before any tests are performed."""
print('\n*** DakotaBase tests')
def teardown_module():
"""Called after all tests have completed."""
pass
# Tests ----------------------------------------------------------------
@raises(TypeError)
def test_instantiate():
"""Test whether DakotaBase fails to instantiate."""
d = DakotaBase()
|
<commit_before><commit_msg>Add unit test for DakotaBase
Still need to determine how to test its non-abstract methods.<commit_after>
|
#!/usr/bin/env python
#
# Tests for dakota.dakota_base module.
#
# Call with:
# $ nosetests -sv
#
# Mark Piper (mark.piper@colorado.edu)
import os
import filecmp
from nose.tools import *
from dakota.dakota_base import DakotaBase
# Fixtures -------------------------------------------------------------
def setup_module():
"""Called before any tests are performed."""
print('\n*** DakotaBase tests')
def teardown_module():
"""Called after all tests have completed."""
pass
# Tests ----------------------------------------------------------------
@raises(TypeError)
def test_instantiate():
"""Test whether DakotaBase fails to instantiate."""
d = DakotaBase()
|
Add unit test for DakotaBase
Still need to determine how to test its non-abstract methods.#!/usr/bin/env python
#
# Tests for dakota.dakota_base module.
#
# Call with:
# $ nosetests -sv
#
# Mark Piper (mark.piper@colorado.edu)
import os
import filecmp
from nose.tools import *
from dakota.dakota_base import DakotaBase
# Fixtures -------------------------------------------------------------
def setup_module():
"""Called before any tests are performed."""
print('\n*** DakotaBase tests')
def teardown_module():
"""Called after all tests have completed."""
pass
# Tests ----------------------------------------------------------------
@raises(TypeError)
def test_instantiate():
"""Test whether DakotaBase fails to instantiate."""
d = DakotaBase()
|
<commit_before><commit_msg>Add unit test for DakotaBase
Still need to determine how to test its non-abstract methods.<commit_after>#!/usr/bin/env python
#
# Tests for dakota.dakota_base module.
#
# Call with:
# $ nosetests -sv
#
# Mark Piper (mark.piper@colorado.edu)
import os
import filecmp
from nose.tools import *
from dakota.dakota_base import DakotaBase
# Fixtures -------------------------------------------------------------
def setup_module():
"""Called before any tests are performed."""
print('\n*** DakotaBase tests')
def teardown_module():
"""Called after all tests have completed."""
pass
# Tests ----------------------------------------------------------------
@raises(TypeError)
def test_instantiate():
"""Test whether DakotaBase fails to instantiate."""
d = DakotaBase()
|
|
0ca7d4a20c8a65e45ddb7c61ca72c0e6c464a80e
|
migrations/versions/0296_template_redacted_fix.py
|
migrations/versions/0296_template_redacted_fix.py
|
"""
Revision ID: 0296_template_redacted_fix
Revises: 0295_api_key_constraint
Create Date: 2019-06-07 17:02:14.350064
"""
from alembic import op
revision = '0296_template_redacted_fix'
down_revision = '0295_api_key_constraint'
def upgrade():
op.execute("""
INSERT INTO template_redacted (template_id, redact_personalisation, updated_at, updated_by_id)
SELECT templates.id, FALSE, now(), templates.created_by_id
FROM templates
WHERE templates.id NOT IN (SELECT template_id FROM template_redacted WHERE template_id = templates.id)
;
""")
def downgrade():
pass
|
Create template_redacted entry for templates created by migration
|
Create template_redacted entry for templates created by migration
So that we can edit those templates
|
Python
|
mit
|
alphagov/notifications-api,alphagov/notifications-api
|
Create template_redacted entry for templates created by migration
So that we can edit those templates
|
"""
Revision ID: 0296_template_redacted_fix
Revises: 0295_api_key_constraint
Create Date: 2019-06-07 17:02:14.350064
"""
from alembic import op
revision = '0296_template_redacted_fix'
down_revision = '0295_api_key_constraint'
def upgrade():
op.execute("""
INSERT INTO template_redacted (template_id, redact_personalisation, updated_at, updated_by_id)
SELECT templates.id, FALSE, now(), templates.created_by_id
FROM templates
WHERE templates.id NOT IN (SELECT template_id FROM template_redacted WHERE template_id = templates.id)
;
""")
def downgrade():
pass
|
<commit_before><commit_msg>Create template_redacted entry for templates created by migration
So that we can edit those templates<commit_after>
|
"""
Revision ID: 0296_template_redacted_fix
Revises: 0295_api_key_constraint
Create Date: 2019-06-07 17:02:14.350064
"""
from alembic import op
revision = '0296_template_redacted_fix'
down_revision = '0295_api_key_constraint'
def upgrade():
op.execute("""
INSERT INTO template_redacted (template_id, redact_personalisation, updated_at, updated_by_id)
SELECT templates.id, FALSE, now(), templates.created_by_id
FROM templates
WHERE templates.id NOT IN (SELECT template_id FROM template_redacted WHERE template_id = templates.id)
;
""")
def downgrade():
pass
|
Create template_redacted entry for templates created by migration
So that we can edit those templates"""
Revision ID: 0296_template_redacted_fix
Revises: 0295_api_key_constraint
Create Date: 2019-06-07 17:02:14.350064
"""
from alembic import op
revision = '0296_template_redacted_fix'
down_revision = '0295_api_key_constraint'
def upgrade():
op.execute("""
INSERT INTO template_redacted (template_id, redact_personalisation, updated_at, updated_by_id)
SELECT templates.id, FALSE, now(), templates.created_by_id
FROM templates
WHERE templates.id NOT IN (SELECT template_id FROM template_redacted WHERE template_id = templates.id)
;
""")
def downgrade():
pass
|
<commit_before><commit_msg>Create template_redacted entry for templates created by migration
So that we can edit those templates<commit_after>"""
Revision ID: 0296_template_redacted_fix
Revises: 0295_api_key_constraint
Create Date: 2019-06-07 17:02:14.350064
"""
from alembic import op
revision = '0296_template_redacted_fix'
down_revision = '0295_api_key_constraint'
def upgrade():
op.execute("""
INSERT INTO template_redacted (template_id, redact_personalisation, updated_at, updated_by_id)
SELECT templates.id, FALSE, now(), templates.created_by_id
FROM templates
WHERE templates.id NOT IN (SELECT template_id FROM template_redacted WHERE template_id = templates.id)
;
""")
def downgrade():
pass
|
|
2611476df6f362cd59e4aad38a243fc8f6cbf8a8
|
devincachu/purger.py
|
devincachu/purger.py
|
# -*- coding: utf-8 -*-
import roan
from django.contrib.flatpages import models
from palestras import models as pmodels
def connect():
flatpages = models.FlatPage.objects.all()
for f in flatpages:
roan.purge(f.url).on_save(models.FlatPage)
palestras = pmodels.Palestra.objects.all()
for p in palestras:
roan.purge(p.get_absolute_url_and_link_title()['url']).on_save(pmodels.Palestra)
roan.purge(p.get_absolute_url_and_link_title()['url']).on_delete(pmodels.Palestra)
|
# -*- coding: utf-8 -*-
import roan
from django.contrib.flatpages import models
from palestras import models as pmodels
def connect():
flatpages = models.FlatPage.objects.all()
for f in flatpages:
roan.purge(f.url).on_save(models.FlatPage)
palestras = pmodels.Palestra.objects.all()
for p in palestras:
roan.purge(p.get_absolute_url_and_link_title()['url']).on_save(pmodels.Palestra)
roan.purge(p.get_absolute_url_and_link_title()['url']).on_delete(pmodels.Palestra)
roan.purge(p.get_absolute_url_and_link_title()['url']).on_save(pmodels.Palestrante)
roan.purge(p.get_absolute_url_and_link_title()['url']).on_delete(pmodels.Palestrante)
|
Purge da página de palestra quando salva palestrante
|
Purge da página de palestra quando salva palestrante
|
Python
|
bsd-2-clause
|
devincachu/devincachu-2013,devincachu/devincachu-2013,devincachu/devincachu-2014,devincachu/devincachu-2014,devincachu/devincachu-2014,devincachu/devincachu-2013,devincachu/devincachu-2013
|
# -*- coding: utf-8 -*-
import roan
from django.contrib.flatpages import models
from palestras import models as pmodels
def connect():
flatpages = models.FlatPage.objects.all()
for f in flatpages:
roan.purge(f.url).on_save(models.FlatPage)
palestras = pmodels.Palestra.objects.all()
for p in palestras:
roan.purge(p.get_absolute_url_and_link_title()['url']).on_save(pmodels.Palestra)
roan.purge(p.get_absolute_url_and_link_title()['url']).on_delete(pmodels.Palestra)
Purge da página de palestra quando salva palestrante
|
# -*- coding: utf-8 -*-
import roan
from django.contrib.flatpages import models
from palestras import models as pmodels
def connect():
flatpages = models.FlatPage.objects.all()
for f in flatpages:
roan.purge(f.url).on_save(models.FlatPage)
palestras = pmodels.Palestra.objects.all()
for p in palestras:
roan.purge(p.get_absolute_url_and_link_title()['url']).on_save(pmodels.Palestra)
roan.purge(p.get_absolute_url_and_link_title()['url']).on_delete(pmodels.Palestra)
roan.purge(p.get_absolute_url_and_link_title()['url']).on_save(pmodels.Palestrante)
roan.purge(p.get_absolute_url_and_link_title()['url']).on_delete(pmodels.Palestrante)
|
<commit_before># -*- coding: utf-8 -*-
import roan
from django.contrib.flatpages import models
from palestras import models as pmodels
def connect():
flatpages = models.FlatPage.objects.all()
for f in flatpages:
roan.purge(f.url).on_save(models.FlatPage)
palestras = pmodels.Palestra.objects.all()
for p in palestras:
roan.purge(p.get_absolute_url_and_link_title()['url']).on_save(pmodels.Palestra)
roan.purge(p.get_absolute_url_and_link_title()['url']).on_delete(pmodels.Palestra)
<commit_msg>Purge da página de palestra quando salva palestrante<commit_after>
|
# -*- coding: utf-8 -*-
import roan
from django.contrib.flatpages import models
from palestras import models as pmodels
def connect():
flatpages = models.FlatPage.objects.all()
for f in flatpages:
roan.purge(f.url).on_save(models.FlatPage)
palestras = pmodels.Palestra.objects.all()
for p in palestras:
roan.purge(p.get_absolute_url_and_link_title()['url']).on_save(pmodels.Palestra)
roan.purge(p.get_absolute_url_and_link_title()['url']).on_delete(pmodels.Palestra)
roan.purge(p.get_absolute_url_and_link_title()['url']).on_save(pmodels.Palestrante)
roan.purge(p.get_absolute_url_and_link_title()['url']).on_delete(pmodels.Palestrante)
|
# -*- coding: utf-8 -*-
import roan
from django.contrib.flatpages import models
from palestras import models as pmodels
def connect():
flatpages = models.FlatPage.objects.all()
for f in flatpages:
roan.purge(f.url).on_save(models.FlatPage)
palestras = pmodels.Palestra.objects.all()
for p in palestras:
roan.purge(p.get_absolute_url_and_link_title()['url']).on_save(pmodels.Palestra)
roan.purge(p.get_absolute_url_and_link_title()['url']).on_delete(pmodels.Palestra)
Purge da página de palestra quando salva palestrante# -*- coding: utf-8 -*-
import roan
from django.contrib.flatpages import models
from palestras import models as pmodels
def connect():
flatpages = models.FlatPage.objects.all()
for f in flatpages:
roan.purge(f.url).on_save(models.FlatPage)
palestras = pmodels.Palestra.objects.all()
for p in palestras:
roan.purge(p.get_absolute_url_and_link_title()['url']).on_save(pmodels.Palestra)
roan.purge(p.get_absolute_url_and_link_title()['url']).on_delete(pmodels.Palestra)
roan.purge(p.get_absolute_url_and_link_title()['url']).on_save(pmodels.Palestrante)
roan.purge(p.get_absolute_url_and_link_title()['url']).on_delete(pmodels.Palestrante)
|
<commit_before># -*- coding: utf-8 -*-
import roan
from django.contrib.flatpages import models
from palestras import models as pmodels
def connect():
flatpages = models.FlatPage.objects.all()
for f in flatpages:
roan.purge(f.url).on_save(models.FlatPage)
palestras = pmodels.Palestra.objects.all()
for p in palestras:
roan.purge(p.get_absolute_url_and_link_title()['url']).on_save(pmodels.Palestra)
roan.purge(p.get_absolute_url_and_link_title()['url']).on_delete(pmodels.Palestra)
<commit_msg>Purge da página de palestra quando salva palestrante<commit_after># -*- coding: utf-8 -*-
import roan
from django.contrib.flatpages import models
from palestras import models as pmodels
def connect():
flatpages = models.FlatPage.objects.all()
for f in flatpages:
roan.purge(f.url).on_save(models.FlatPage)
palestras = pmodels.Palestra.objects.all()
for p in palestras:
roan.purge(p.get_absolute_url_and_link_title()['url']).on_save(pmodels.Palestra)
roan.purge(p.get_absolute_url_and_link_title()['url']).on_delete(pmodels.Palestra)
roan.purge(p.get_absolute_url_and_link_title()['url']).on_save(pmodels.Palestrante)
roan.purge(p.get_absolute_url_and_link_title()['url']).on_delete(pmodels.Palestrante)
|
cdd1f3410b8ae304485f7992ac6048e1277cffe1
|
parsedatetime/pdt_locales/__init__.py
|
parsedatetime/pdt_locales/__init__.py
|
# -*- encoding: utf-8 -*-
"""
pdt_locales
All of the included locale classes shipped with pdt.
"""
try:
import PyICU as pyicu
except:
pyicu = None
def lcase(x):
return x.lower()
from .base import pdtLocale_base, pdtLocale_icu
from .de_DE import *
from .en_AU import *
from .en_US import *
from .es import *
from .nl_NL import *
from .pt_BR import *
from .ru_RU import *
|
# -*- encoding: utf-8 -*-
"""
pdt_locales
All of the included locale classes shipped with pdt.
"""
import os
try:
import PyICU as pyicu
except:
pyicu = None
import yaml
def lcase(x):
return x.lower()
from .base import pdtLocale_base, pdtLocale_icu
from .de_DE import *
from .en_AU import *
from .en_US import *
from .es import *
from .nl_NL import *
from .pt_BR import *
from .ru_RU import *
pdtLocales = [
'icu',
'en_US',
'en_AU',
'es_ES',
'de_DE',
'nl_NL',
'ru_RU',
]
def load_yaml(path):
"""
Read yaml data from filepath
:param path:
:return:
"""
with open(path, 'r') as fio:
return yaml.load(fio.read())
def _get_yaml_path(locale):
"""
Return filepath of locale file
:param locale:
:return:
"""
return os.path.join(os.path.dirname(__file__), '%s.yaml' % locale)
def load_locale(locale):
"""
Return data of locale
:param locale:
:return:
"""
assert locale in pdtLocales, "The locale '%s' is not supported" % locale
_data_base = load_yaml(_get_yaml_path('base'))
return _data_base.update(**load_yaml(_get_yaml_path(locale)))
load_locale('ru_RU')
|
Add local locale from file
|
Add local locale from file
|
Python
|
apache-2.0
|
phoebebright/parsedatetime,bear/parsedatetime,idpaterson/parsedatetime
|
# -*- encoding: utf-8 -*-
"""
pdt_locales
All of the included locale classes shipped with pdt.
"""
try:
import PyICU as pyicu
except:
pyicu = None
def lcase(x):
return x.lower()
from .base import pdtLocale_base, pdtLocale_icu
from .de_DE import *
from .en_AU import *
from .en_US import *
from .es import *
from .nl_NL import *
from .pt_BR import *
from .ru_RU import *
Add local locale from file
|
# -*- encoding: utf-8 -*-
"""
pdt_locales
All of the included locale classes shipped with pdt.
"""
import os
try:
import PyICU as pyicu
except:
pyicu = None
import yaml
def lcase(x):
return x.lower()
from .base import pdtLocale_base, pdtLocale_icu
from .de_DE import *
from .en_AU import *
from .en_US import *
from .es import *
from .nl_NL import *
from .pt_BR import *
from .ru_RU import *
pdtLocales = [
'icu',
'en_US',
'en_AU',
'es_ES',
'de_DE',
'nl_NL',
'ru_RU',
]
def load_yaml(path):
"""
Read yaml data from filepath
:param path:
:return:
"""
with open(path, 'r') as fio:
return yaml.load(fio.read())
def _get_yaml_path(locale):
"""
Return filepath of locale file
:param locale:
:return:
"""
return os.path.join(os.path.dirname(__file__), '%s.yaml' % locale)
def load_locale(locale):
"""
Return data of locale
:param locale:
:return:
"""
assert locale in pdtLocales, "The locale '%s' is not supported" % locale
_data_base = load_yaml(_get_yaml_path('base'))
return _data_base.update(**load_yaml(_get_yaml_path(locale)))
load_locale('ru_RU')
|
<commit_before># -*- encoding: utf-8 -*-
"""
pdt_locales
All of the included locale classes shipped with pdt.
"""
try:
import PyICU as pyicu
except:
pyicu = None
def lcase(x):
return x.lower()
from .base import pdtLocale_base, pdtLocale_icu
from .de_DE import *
from .en_AU import *
from .en_US import *
from .es import *
from .nl_NL import *
from .pt_BR import *
from .ru_RU import *
<commit_msg>Add local locale from file<commit_after>
|
# -*- encoding: utf-8 -*-
"""
pdt_locales
All of the included locale classes shipped with pdt.
"""
import os
try:
import PyICU as pyicu
except:
pyicu = None
import yaml
def lcase(x):
return x.lower()
from .base import pdtLocale_base, pdtLocale_icu
from .de_DE import *
from .en_AU import *
from .en_US import *
from .es import *
from .nl_NL import *
from .pt_BR import *
from .ru_RU import *
pdtLocales = [
'icu',
'en_US',
'en_AU',
'es_ES',
'de_DE',
'nl_NL',
'ru_RU',
]
def load_yaml(path):
"""
Read yaml data from filepath
:param path:
:return:
"""
with open(path, 'r') as fio:
return yaml.load(fio.read())
def _get_yaml_path(locale):
"""
Return filepath of locale file
:param locale:
:return:
"""
return os.path.join(os.path.dirname(__file__), '%s.yaml' % locale)
def load_locale(locale):
"""
Return data of locale
:param locale:
:return:
"""
assert locale in pdtLocales, "The locale '%s' is not supported" % locale
_data_base = load_yaml(_get_yaml_path('base'))
return _data_base.update(**load_yaml(_get_yaml_path(locale)))
load_locale('ru_RU')
|
# -*- encoding: utf-8 -*-
"""
pdt_locales
All of the included locale classes shipped with pdt.
"""
try:
import PyICU as pyicu
except:
pyicu = None
def lcase(x):
return x.lower()
from .base import pdtLocale_base, pdtLocale_icu
from .de_DE import *
from .en_AU import *
from .en_US import *
from .es import *
from .nl_NL import *
from .pt_BR import *
from .ru_RU import *
Add local locale from file# -*- encoding: utf-8 -*-
"""
pdt_locales
All of the included locale classes shipped with pdt.
"""
import os
try:
import PyICU as pyicu
except:
pyicu = None
import yaml
def lcase(x):
return x.lower()
from .base import pdtLocale_base, pdtLocale_icu
from .de_DE import *
from .en_AU import *
from .en_US import *
from .es import *
from .nl_NL import *
from .pt_BR import *
from .ru_RU import *
pdtLocales = [
'icu',
'en_US',
'en_AU',
'es_ES',
'de_DE',
'nl_NL',
'ru_RU',
]
def load_yaml(path):
"""
Read yaml data from filepath
:param path:
:return:
"""
with open(path, 'r') as fio:
return yaml.load(fio.read())
def _get_yaml_path(locale):
"""
Return filepath of locale file
:param locale:
:return:
"""
return os.path.join(os.path.dirname(__file__), '%s.yaml' % locale)
def load_locale(locale):
"""
Return data of locale
:param locale:
:return:
"""
assert locale in pdtLocales, "The locale '%s' is not supported" % locale
_data_base = load_yaml(_get_yaml_path('base'))
return _data_base.update(**load_yaml(_get_yaml_path(locale)))
load_locale('ru_RU')
|
<commit_before># -*- encoding: utf-8 -*-
"""
pdt_locales
All of the included locale classes shipped with pdt.
"""
try:
import PyICU as pyicu
except:
pyicu = None
def lcase(x):
return x.lower()
from .base import pdtLocale_base, pdtLocale_icu
from .de_DE import *
from .en_AU import *
from .en_US import *
from .es import *
from .nl_NL import *
from .pt_BR import *
from .ru_RU import *
<commit_msg>Add local locale from file<commit_after># -*- encoding: utf-8 -*-
"""
pdt_locales
All of the included locale classes shipped with pdt.
"""
import os
try:
import PyICU as pyicu
except:
pyicu = None
import yaml
def lcase(x):
return x.lower()
from .base import pdtLocale_base, pdtLocale_icu
from .de_DE import *
from .en_AU import *
from .en_US import *
from .es import *
from .nl_NL import *
from .pt_BR import *
from .ru_RU import *
pdtLocales = [
'icu',
'en_US',
'en_AU',
'es_ES',
'de_DE',
'nl_NL',
'ru_RU',
]
def load_yaml(path):
"""
Read yaml data from filepath
:param path:
:return:
"""
with open(path, 'r') as fio:
return yaml.load(fio.read())
def _get_yaml_path(locale):
"""
Return filepath of locale file
:param locale:
:return:
"""
return os.path.join(os.path.dirname(__file__), '%s.yaml' % locale)
def load_locale(locale):
"""
Return data of locale
:param locale:
:return:
"""
assert locale in pdtLocales, "The locale '%s' is not supported" % locale
_data_base = load_yaml(_get_yaml_path('base'))
return _data_base.update(**load_yaml(_get_yaml_path(locale)))
load_locale('ru_RU')
|
b3889f8ff6d66963d4253d6796c3bb20dc9adbb7
|
scripts/my_Param.py
|
scripts/my_Param.py
|
#=================================================
# Observation
#-------------------------------------------------
sstObsPath = '/clim_obs/obs/ocn/mo/tos/UKMETOFFICE-HadISST-v1-1/130122_HadISST_sst.nc'
tauxObsPath = '/clim_obs/obs/atm/mo/tauu/ERAINT/tauu_ERAINT_198901-200911.nc'
sstNameObs = 'sst'
tauxNameObs = 'tauu'
#=================================================
# Models
#-------------------------------------------------
modpath = '/work/cmip5/historical/atm/mo/VAR/cmip5.MOD.historical.r1i1p1.mo.atm.Amon.VAR.ver-1.latestX.xml'
modnames = ['ACCESS1-0', 'ACCESS1-3',
'BNU-ESM',
'CMCC-CESM', 'CMCC-CM', 'CMCC-CMS',
'CSIRO-Mk3-6-0', 'CanCM4',
'GISS-E2-H-CC', 'GISS-E2-H', 'GISS-E2-R-CC', 'GISS-E2-R',
'HadCM3', 'HadGEM2-AO', 'HadGEM2-CC', 'HadGEM2-ES',
'IPSL-CM5A-LR',
'MIROC-ESM-CHEM', 'MIROC-ESM', 'MIROC4h', 'MIROC5',
'MPI-ESM-LR', 'MPI-ESM-MR',
'inmcm4'
]
modnames = ['IPSL-CM5A-LR']
# Variables
sstName = 'ts'
tauxName= 'tauu'
#=================================================
# Output
#-------------------------------------------------
outpathdata = '.' # e.g. '/user/directory/output/nc'
outpathjsons = '.' # e.g. '/user/directory/output/json'
outnamejson = 'test.json'
#=================================================
# Output
#-------------------------------------------------
# Metrics
metrics = ['EnsoAmpl', 'EnsoMu']
# Variable name and nino box
ninoBox = 'nino3'
|
Add external driver and parameter file
|
Add external driver and parameter file
|
Python
|
bsd-3-clause
|
eguil/ENSO_metrics,eguil/ENSO_metrics
|
Add external driver and parameter file
|
#=================================================
# Observation
#-------------------------------------------------
sstObsPath = '/clim_obs/obs/ocn/mo/tos/UKMETOFFICE-HadISST-v1-1/130122_HadISST_sst.nc'
tauxObsPath = '/clim_obs/obs/atm/mo/tauu/ERAINT/tauu_ERAINT_198901-200911.nc'
sstNameObs = 'sst'
tauxNameObs = 'tauu'
#=================================================
# Models
#-------------------------------------------------
modpath = '/work/cmip5/historical/atm/mo/VAR/cmip5.MOD.historical.r1i1p1.mo.atm.Amon.VAR.ver-1.latestX.xml'
modnames = ['ACCESS1-0', 'ACCESS1-3',
'BNU-ESM',
'CMCC-CESM', 'CMCC-CM', 'CMCC-CMS',
'CSIRO-Mk3-6-0', 'CanCM4',
'GISS-E2-H-CC', 'GISS-E2-H', 'GISS-E2-R-CC', 'GISS-E2-R',
'HadCM3', 'HadGEM2-AO', 'HadGEM2-CC', 'HadGEM2-ES',
'IPSL-CM5A-LR',
'MIROC-ESM-CHEM', 'MIROC-ESM', 'MIROC4h', 'MIROC5',
'MPI-ESM-LR', 'MPI-ESM-MR',
'inmcm4'
]
modnames = ['IPSL-CM5A-LR']
# Variables
sstName = 'ts'
tauxName= 'tauu'
#=================================================
# Output
#-------------------------------------------------
outpathdata = '.' # e.g. '/user/directory/output/nc'
outpathjsons = '.' # e.g. '/user/directory/output/json'
outnamejson = 'test.json'
#=================================================
# Output
#-------------------------------------------------
# Metrics
metrics = ['EnsoAmpl', 'EnsoMu']
# Variable name and nino box
ninoBox = 'nino3'
|
<commit_before><commit_msg>Add external driver and parameter file<commit_after>
|
#=================================================
# Observation
#-------------------------------------------------
sstObsPath = '/clim_obs/obs/ocn/mo/tos/UKMETOFFICE-HadISST-v1-1/130122_HadISST_sst.nc'
tauxObsPath = '/clim_obs/obs/atm/mo/tauu/ERAINT/tauu_ERAINT_198901-200911.nc'
sstNameObs = 'sst'
tauxNameObs = 'tauu'
#=================================================
# Models
#-------------------------------------------------
modpath = '/work/cmip5/historical/atm/mo/VAR/cmip5.MOD.historical.r1i1p1.mo.atm.Amon.VAR.ver-1.latestX.xml'
modnames = ['ACCESS1-0', 'ACCESS1-3',
'BNU-ESM',
'CMCC-CESM', 'CMCC-CM', 'CMCC-CMS',
'CSIRO-Mk3-6-0', 'CanCM4',
'GISS-E2-H-CC', 'GISS-E2-H', 'GISS-E2-R-CC', 'GISS-E2-R',
'HadCM3', 'HadGEM2-AO', 'HadGEM2-CC', 'HadGEM2-ES',
'IPSL-CM5A-LR',
'MIROC-ESM-CHEM', 'MIROC-ESM', 'MIROC4h', 'MIROC5',
'MPI-ESM-LR', 'MPI-ESM-MR',
'inmcm4'
]
modnames = ['IPSL-CM5A-LR']
# Variables
sstName = 'ts'
tauxName= 'tauu'
#=================================================
# Output
#-------------------------------------------------
outpathdata = '.' # e.g. '/user/directory/output/nc'
outpathjsons = '.' # e.g. '/user/directory/output/json'
outnamejson = 'test.json'
#=================================================
# Output
#-------------------------------------------------
# Metrics
metrics = ['EnsoAmpl', 'EnsoMu']
# Variable name and nino box
ninoBox = 'nino3'
|
Add external driver and parameter file#=================================================
# Observation
#-------------------------------------------------
sstObsPath = '/clim_obs/obs/ocn/mo/tos/UKMETOFFICE-HadISST-v1-1/130122_HadISST_sst.nc'
tauxObsPath = '/clim_obs/obs/atm/mo/tauu/ERAINT/tauu_ERAINT_198901-200911.nc'
sstNameObs = 'sst'
tauxNameObs = 'tauu'
#=================================================
# Models
#-------------------------------------------------
modpath = '/work/cmip5/historical/atm/mo/VAR/cmip5.MOD.historical.r1i1p1.mo.atm.Amon.VAR.ver-1.latestX.xml'
modnames = ['ACCESS1-0', 'ACCESS1-3',
'BNU-ESM',
'CMCC-CESM', 'CMCC-CM', 'CMCC-CMS',
'CSIRO-Mk3-6-0', 'CanCM4',
'GISS-E2-H-CC', 'GISS-E2-H', 'GISS-E2-R-CC', 'GISS-E2-R',
'HadCM3', 'HadGEM2-AO', 'HadGEM2-CC', 'HadGEM2-ES',
'IPSL-CM5A-LR',
'MIROC-ESM-CHEM', 'MIROC-ESM', 'MIROC4h', 'MIROC5',
'MPI-ESM-LR', 'MPI-ESM-MR',
'inmcm4'
]
modnames = ['IPSL-CM5A-LR']
# Variables
sstName = 'ts'
tauxName= 'tauu'
#=================================================
# Output
#-------------------------------------------------
outpathdata = '.' # e.g. '/user/directory/output/nc'
outpathjsons = '.' # e.g. '/user/directory/output/json'
outnamejson = 'test.json'
#=================================================
# Output
#-------------------------------------------------
# Metrics
metrics = ['EnsoAmpl', 'EnsoMu']
# Variable name and nino box
ninoBox = 'nino3'
|
<commit_before><commit_msg>Add external driver and parameter file<commit_after>#=================================================
# Observation
#-------------------------------------------------
sstObsPath = '/clim_obs/obs/ocn/mo/tos/UKMETOFFICE-HadISST-v1-1/130122_HadISST_sst.nc'
tauxObsPath = '/clim_obs/obs/atm/mo/tauu/ERAINT/tauu_ERAINT_198901-200911.nc'
sstNameObs = 'sst'
tauxNameObs = 'tauu'
#=================================================
# Models
#-------------------------------------------------
modpath = '/work/cmip5/historical/atm/mo/VAR/cmip5.MOD.historical.r1i1p1.mo.atm.Amon.VAR.ver-1.latestX.xml'
modnames = ['ACCESS1-0', 'ACCESS1-3',
'BNU-ESM',
'CMCC-CESM', 'CMCC-CM', 'CMCC-CMS',
'CSIRO-Mk3-6-0', 'CanCM4',
'GISS-E2-H-CC', 'GISS-E2-H', 'GISS-E2-R-CC', 'GISS-E2-R',
'HadCM3', 'HadGEM2-AO', 'HadGEM2-CC', 'HadGEM2-ES',
'IPSL-CM5A-LR',
'MIROC-ESM-CHEM', 'MIROC-ESM', 'MIROC4h', 'MIROC5',
'MPI-ESM-LR', 'MPI-ESM-MR',
'inmcm4'
]
modnames = ['IPSL-CM5A-LR']
# Variables
sstName = 'ts'
tauxName= 'tauu'
#=================================================
# Output
#-------------------------------------------------
outpathdata = '.' # e.g. '/user/directory/output/nc'
outpathjsons = '.' # e.g. '/user/directory/output/json'
outnamejson = 'test.json'
#=================================================
# Output
#-------------------------------------------------
# Metrics
metrics = ['EnsoAmpl', 'EnsoMu']
# Variable name and nino box
ninoBox = 'nino3'
|
|
4ff6b846311a0f7bd6cfcf2e661a7c53061406fe
|
glaciercmd/command_vault_info.py
|
glaciercmd/command_vault_info.py
|
import boto
class CommandVaultInfo(object):
def execute(self, args, config):
glacier_connection = boto.connect_glacier(aws_access_key_id=config.get('configuration', 'aws_key'), aws_secret_access_key=config.get('configuration', 'aws_secret'))
try:
vault = glacier_connection.get_vault(args[2])
print "Vault info:\n\tname={}\n\tarn={}\n\tcreation_date={}\n\tlast_inventory_date={}\n\tsize={}\n\tnumber_of_archives={}".format(vault.name, vault.arn, vault.creation_date, vault.last_inventory_date, vault.size, vault.number_of_archives)
except:
print "Vaule named '{}' does not exist.".format(args[2])
def accept(self, args):
return len(args) >= 3 and args[0] == 'vault' and args[1] == 'info'
def command_init():
return CommandVaultInfo()
|
Add command to print vault info
|
Add command to print vault info
|
Python
|
mit
|
carsonmcdonald/glacier-cmd
|
Add command to print vault info
|
import boto
class CommandVaultInfo(object):
def execute(self, args, config):
glacier_connection = boto.connect_glacier(aws_access_key_id=config.get('configuration', 'aws_key'), aws_secret_access_key=config.get('configuration', 'aws_secret'))
try:
vault = glacier_connection.get_vault(args[2])
print "Vault info:\n\tname={}\n\tarn={}\n\tcreation_date={}\n\tlast_inventory_date={}\n\tsize={}\n\tnumber_of_archives={}".format(vault.name, vault.arn, vault.creation_date, vault.last_inventory_date, vault.size, vault.number_of_archives)
except:
print "Vaule named '{}' does not exist.".format(args[2])
def accept(self, args):
return len(args) >= 3 and args[0] == 'vault' and args[1] == 'info'
def command_init():
return CommandVaultInfo()
|
<commit_before><commit_msg>Add command to print vault info<commit_after>
|
import boto
class CommandVaultInfo(object):
def execute(self, args, config):
glacier_connection = boto.connect_glacier(aws_access_key_id=config.get('configuration', 'aws_key'), aws_secret_access_key=config.get('configuration', 'aws_secret'))
try:
vault = glacier_connection.get_vault(args[2])
print "Vault info:\n\tname={}\n\tarn={}\n\tcreation_date={}\n\tlast_inventory_date={}\n\tsize={}\n\tnumber_of_archives={}".format(vault.name, vault.arn, vault.creation_date, vault.last_inventory_date, vault.size, vault.number_of_archives)
except:
print "Vaule named '{}' does not exist.".format(args[2])
def accept(self, args):
return len(args) >= 3 and args[0] == 'vault' and args[1] == 'info'
def command_init():
return CommandVaultInfo()
|
Add command to print vault infoimport boto
class CommandVaultInfo(object):
def execute(self, args, config):
glacier_connection = boto.connect_glacier(aws_access_key_id=config.get('configuration', 'aws_key'), aws_secret_access_key=config.get('configuration', 'aws_secret'))
try:
vault = glacier_connection.get_vault(args[2])
print "Vault info:\n\tname={}\n\tarn={}\n\tcreation_date={}\n\tlast_inventory_date={}\n\tsize={}\n\tnumber_of_archives={}".format(vault.name, vault.arn, vault.creation_date, vault.last_inventory_date, vault.size, vault.number_of_archives)
except:
print "Vaule named '{}' does not exist.".format(args[2])
def accept(self, args):
return len(args) >= 3 and args[0] == 'vault' and args[1] == 'info'
def command_init():
return CommandVaultInfo()
|
<commit_before><commit_msg>Add command to print vault info<commit_after>import boto
class CommandVaultInfo(object):
def execute(self, args, config):
glacier_connection = boto.connect_glacier(aws_access_key_id=config.get('configuration', 'aws_key'), aws_secret_access_key=config.get('configuration', 'aws_secret'))
try:
vault = glacier_connection.get_vault(args[2])
print "Vault info:\n\tname={}\n\tarn={}\n\tcreation_date={}\n\tlast_inventory_date={}\n\tsize={}\n\tnumber_of_archives={}".format(vault.name, vault.arn, vault.creation_date, vault.last_inventory_date, vault.size, vault.number_of_archives)
except:
print "Vaule named '{}' does not exist.".format(args[2])
def accept(self, args):
return len(args) >= 3 and args[0] == 'vault' and args[1] == 'info'
def command_init():
return CommandVaultInfo()
|
|
582ebd448508625ed2c9f362aaafc3fc46e60df0
|
functest/tests/unit/features/test_security_scan.py
|
functest/tests/unit/features/test_security_scan.py
|
#!/usr/bin/env python
# Copyright (c) 2017 Orange and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
# pylint: disable=missing-docstring
import logging
import unittest
from functest.opnfv_tests.features import security_scan
from functest.utils import constants
class SecurityScanTesting(unittest.TestCase):
logging.disable(logging.CRITICAL)
def setUp(self):
self.sscan = security_scan.SecurityScan()
def test_init(self):
self.assertEqual(self.sscan.project_name, "securityscanning")
self.assertEqual(self.sscan.case_name, "security_scan")
self.assertEqual(
self.sscan.repo,
constants.CONST.__getattribute__("dir_repo_securityscan"))
self.assertEqual(
self.sscan.cmd, (
'. {0}/stackrc && cd {1} && '
'python security_scan.py --config config.ini && '
'cd -'.format(
constants.CONST.__getattribute__("dir_functest_conf"),
self.sscan.repo)))
if __name__ == "__main__":
unittest.main(verbosity=2)
|
Add unit tests for security_scan
|
Add unit tests for security_scan
Change-Id: Idda799c01408aa9afd09d573e23f42e011f3fafb
Signed-off-by: Cédric Ollivier <d48310251a4a484d041bc5d09a9ac4d86d20f793@orange.com>
|
Python
|
apache-2.0
|
opnfv/functest,opnfv/functest,mywulin/functest,mywulin/functest
|
Add unit tests for security_scan
Change-Id: Idda799c01408aa9afd09d573e23f42e011f3fafb
Signed-off-by: Cédric Ollivier <d48310251a4a484d041bc5d09a9ac4d86d20f793@orange.com>
|
#!/usr/bin/env python
# Copyright (c) 2017 Orange and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
# pylint: disable=missing-docstring
import logging
import unittest
from functest.opnfv_tests.features import security_scan
from functest.utils import constants
class SecurityScanTesting(unittest.TestCase):
logging.disable(logging.CRITICAL)
def setUp(self):
self.sscan = security_scan.SecurityScan()
def test_init(self):
self.assertEqual(self.sscan.project_name, "securityscanning")
self.assertEqual(self.sscan.case_name, "security_scan")
self.assertEqual(
self.sscan.repo,
constants.CONST.__getattribute__("dir_repo_securityscan"))
self.assertEqual(
self.sscan.cmd, (
'. {0}/stackrc && cd {1} && '
'python security_scan.py --config config.ini && '
'cd -'.format(
constants.CONST.__getattribute__("dir_functest_conf"),
self.sscan.repo)))
if __name__ == "__main__":
unittest.main(verbosity=2)
|
<commit_before><commit_msg>Add unit tests for security_scan
Change-Id: Idda799c01408aa9afd09d573e23f42e011f3fafb
Signed-off-by: Cédric Ollivier <d48310251a4a484d041bc5d09a9ac4d86d20f793@orange.com><commit_after>
|
#!/usr/bin/env python
# Copyright (c) 2017 Orange and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
# pylint: disable=missing-docstring
import logging
import unittest
from functest.opnfv_tests.features import security_scan
from functest.utils import constants
class SecurityScanTesting(unittest.TestCase):
logging.disable(logging.CRITICAL)
def setUp(self):
self.sscan = security_scan.SecurityScan()
def test_init(self):
self.assertEqual(self.sscan.project_name, "securityscanning")
self.assertEqual(self.sscan.case_name, "security_scan")
self.assertEqual(
self.sscan.repo,
constants.CONST.__getattribute__("dir_repo_securityscan"))
self.assertEqual(
self.sscan.cmd, (
'. {0}/stackrc && cd {1} && '
'python security_scan.py --config config.ini && '
'cd -'.format(
constants.CONST.__getattribute__("dir_functest_conf"),
self.sscan.repo)))
if __name__ == "__main__":
unittest.main(verbosity=2)
|
Add unit tests for security_scan
Change-Id: Idda799c01408aa9afd09d573e23f42e011f3fafb
Signed-off-by: Cédric Ollivier <d48310251a4a484d041bc5d09a9ac4d86d20f793@orange.com>#!/usr/bin/env python
# Copyright (c) 2017 Orange and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
# pylint: disable=missing-docstring
import logging
import unittest
from functest.opnfv_tests.features import security_scan
from functest.utils import constants
class SecurityScanTesting(unittest.TestCase):
logging.disable(logging.CRITICAL)
def setUp(self):
self.sscan = security_scan.SecurityScan()
def test_init(self):
self.assertEqual(self.sscan.project_name, "securityscanning")
self.assertEqual(self.sscan.case_name, "security_scan")
self.assertEqual(
self.sscan.repo,
constants.CONST.__getattribute__("dir_repo_securityscan"))
self.assertEqual(
self.sscan.cmd, (
'. {0}/stackrc && cd {1} && '
'python security_scan.py --config config.ini && '
'cd -'.format(
constants.CONST.__getattribute__("dir_functest_conf"),
self.sscan.repo)))
if __name__ == "__main__":
unittest.main(verbosity=2)
|
<commit_before><commit_msg>Add unit tests for security_scan
Change-Id: Idda799c01408aa9afd09d573e23f42e011f3fafb
Signed-off-by: Cédric Ollivier <d48310251a4a484d041bc5d09a9ac4d86d20f793@orange.com><commit_after>#!/usr/bin/env python
# Copyright (c) 2017 Orange and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
# pylint: disable=missing-docstring
import logging
import unittest
from functest.opnfv_tests.features import security_scan
from functest.utils import constants
class SecurityScanTesting(unittest.TestCase):
logging.disable(logging.CRITICAL)
def setUp(self):
self.sscan = security_scan.SecurityScan()
def test_init(self):
self.assertEqual(self.sscan.project_name, "securityscanning")
self.assertEqual(self.sscan.case_name, "security_scan")
self.assertEqual(
self.sscan.repo,
constants.CONST.__getattribute__("dir_repo_securityscan"))
self.assertEqual(
self.sscan.cmd, (
'. {0}/stackrc && cd {1} && '
'python security_scan.py --config config.ini && '
'cd -'.format(
constants.CONST.__getattribute__("dir_functest_conf"),
self.sscan.repo)))
if __name__ == "__main__":
unittest.main(verbosity=2)
|
|
2ca07d4a8893196bbf304bcdac16688505e6123a
|
shopify/webhooks/management/commands/webhookregister.py
|
shopify/webhooks/management/commands/webhookregister.py
|
from django.core.management.base import NoArgsCommand
from webhooks.models import Webhook
class Command(NoArgsCommand):
help = 'Register all created Shopify webhooks'
def handle_noargs(self, **options):
Webhook.objects.register()
|
Add a management command to register webhooks
|
Add a management command to register webhooks
|
Python
|
bsd-3-clause
|
CorbanU/corban-shopify,CorbanU/corban-shopify
|
Add a management command to register webhooks
|
from django.core.management.base import NoArgsCommand
from webhooks.models import Webhook
class Command(NoArgsCommand):
help = 'Register all created Shopify webhooks'
def handle_noargs(self, **options):
Webhook.objects.register()
|
<commit_before><commit_msg>Add a management command to register webhooks<commit_after>
|
from django.core.management.base import NoArgsCommand
from webhooks.models import Webhook
class Command(NoArgsCommand):
help = 'Register all created Shopify webhooks'
def handle_noargs(self, **options):
Webhook.objects.register()
|
Add a management command to register webhooksfrom django.core.management.base import NoArgsCommand
from webhooks.models import Webhook
class Command(NoArgsCommand):
help = 'Register all created Shopify webhooks'
def handle_noargs(self, **options):
Webhook.objects.register()
|
<commit_before><commit_msg>Add a management command to register webhooks<commit_after>from django.core.management.base import NoArgsCommand
from webhooks.models import Webhook
class Command(NoArgsCommand):
help = 'Register all created Shopify webhooks'
def handle_noargs(self, **options):
Webhook.objects.register()
|
|
ee859881af0633d4d2d88015c907cfa856516dbe
|
lintcode/000-000-Two-Sum-II/TwoSumII.py
|
lintcode/000-000-Two-Sum-II/TwoSumII.py
|
class Solution:
# @param nums, an array of integer
# @param target, an integer
# @return an integer
def twoSum2(self, nums, target):
# Write your code here
nums.sort()
i, j = 0, len(nums) - 1
res = 0
while i < j:
if nums[i] + nums[j] <= target:
i += 1
else:
res += j - i
j -= 1
return res
|
Create TwoSum II for Lint
|
Create TwoSum II for Lint
|
Python
|
mit
|
Chasego/codi,Chasego/codi,cc13ny/Allin,cc13ny/algo,Chasego/cod,cc13ny/algo,Chasego/codirit,Chasego/cod,cc13ny/Allin,Chasego/codirit,Chasego/codi,Chasego/codirit,cc13ny/Allin,Chasego/codi,Chasego/codi,Chasego/codirit,cc13ny/algo,Chasego/cod,cc13ny/Allin,cc13ny/algo,Chasego/codirit,cc13ny/algo,cc13ny/Allin,Chasego/cod,Chasego/cod
|
Create TwoSum II for Lint
|
class Solution:
# @param nums, an array of integer
# @param target, an integer
# @return an integer
def twoSum2(self, nums, target):
# Write your code here
nums.sort()
i, j = 0, len(nums) - 1
res = 0
while i < j:
if nums[i] + nums[j] <= target:
i += 1
else:
res += j - i
j -= 1
return res
|
<commit_before><commit_msg>Create TwoSum II for Lint<commit_after>
|
class Solution:
# @param nums, an array of integer
# @param target, an integer
# @return an integer
def twoSum2(self, nums, target):
# Write your code here
nums.sort()
i, j = 0, len(nums) - 1
res = 0
while i < j:
if nums[i] + nums[j] <= target:
i += 1
else:
res += j - i
j -= 1
return res
|
Create TwoSum II for Lintclass Solution:
# @param nums, an array of integer
# @param target, an integer
# @return an integer
def twoSum2(self, nums, target):
# Write your code here
nums.sort()
i, j = 0, len(nums) - 1
res = 0
while i < j:
if nums[i] + nums[j] <= target:
i += 1
else:
res += j - i
j -= 1
return res
|
<commit_before><commit_msg>Create TwoSum II for Lint<commit_after>class Solution:
# @param nums, an array of integer
# @param target, an integer
# @return an integer
def twoSum2(self, nums, target):
# Write your code here
nums.sort()
i, j = 0, len(nums) - 1
res = 0
while i < j:
if nums[i] + nums[j] <= target:
i += 1
else:
res += j - i
j -= 1
return res
|
|
52e8a378d8a31989c9d93ef83eabbe6df339f915
|
src/waldur_mastermind/marketplace/migrations/0083_offering_component.py
|
src/waldur_mastermind/marketplace/migrations/0083_offering_component.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from waldur_mastermind.marketplace_openstack import STORAGE_TYPE, RAM_TYPE, CORES_TYPE, PACKAGE_TYPE
def create_category_components(apps, schema_editor):
CATEGORY_TITLE = 'Private clouds'
Category = apps.get_model('marketplace', 'Category')
CategoryComponent = apps.get_model('marketplace', 'CategoryComponent')
OfferingComponent = apps.get_model('marketplace', 'OfferingComponent')
try:
vpc_category = Category.objects.get(title=CATEGORY_TITLE)
except Category.DoesNotExist:
return
storage_gb_cc, _ = CategoryComponent.objects.get_or_create(
category=vpc_category,
type=STORAGE_TYPE,
name='Storage',
measured_unit='GB'
)
ram_gb_cc, _ = CategoryComponent.objects.get_or_create(
category=vpc_category,
type=RAM_TYPE,
name='RAM',
measured_unit='GB'
)
cores_cc, _ = CategoryComponent.objects.get_or_create(
category=vpc_category,
type=CORES_TYPE,
name='Cores',
measured_unit='cores'
)
components = OfferingComponent.objects.filter(offering__type=PACKAGE_TYPE, parent=None)
components.filter(type=STORAGE_TYPE).update(parent=storage_gb_cc)
components.filter(type=RAM_TYPE).update(parent=ram_gb_cc)
components.filter(type=CORES_TYPE).update(parent=cores_cc)
class Migration(migrations.Migration):
dependencies = [
('marketplace', '0082_orderitem_activated'),
]
operations = [
migrations.RunPython(create_category_components),
]
|
Add data migration to add category components for VPC.
|
Add data migration to add category components for VPC.
|
Python
|
mit
|
opennode/waldur-mastermind,opennode/waldur-mastermind,opennode/nodeconductor-assembly-waldur,opennode/waldur-mastermind,opennode/waldur-mastermind,opennode/nodeconductor-assembly-waldur,opennode/nodeconductor-assembly-waldur
|
Add data migration to add category components for VPC.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from waldur_mastermind.marketplace_openstack import STORAGE_TYPE, RAM_TYPE, CORES_TYPE, PACKAGE_TYPE
def create_category_components(apps, schema_editor):
CATEGORY_TITLE = 'Private clouds'
Category = apps.get_model('marketplace', 'Category')
CategoryComponent = apps.get_model('marketplace', 'CategoryComponent')
OfferingComponent = apps.get_model('marketplace', 'OfferingComponent')
try:
vpc_category = Category.objects.get(title=CATEGORY_TITLE)
except Category.DoesNotExist:
return
storage_gb_cc, _ = CategoryComponent.objects.get_or_create(
category=vpc_category,
type=STORAGE_TYPE,
name='Storage',
measured_unit='GB'
)
ram_gb_cc, _ = CategoryComponent.objects.get_or_create(
category=vpc_category,
type=RAM_TYPE,
name='RAM',
measured_unit='GB'
)
cores_cc, _ = CategoryComponent.objects.get_or_create(
category=vpc_category,
type=CORES_TYPE,
name='Cores',
measured_unit='cores'
)
components = OfferingComponent.objects.filter(offering__type=PACKAGE_TYPE, parent=None)
components.filter(type=STORAGE_TYPE).update(parent=storage_gb_cc)
components.filter(type=RAM_TYPE).update(parent=ram_gb_cc)
components.filter(type=CORES_TYPE).update(parent=cores_cc)
class Migration(migrations.Migration):
dependencies = [
('marketplace', '0082_orderitem_activated'),
]
operations = [
migrations.RunPython(create_category_components),
]
|
<commit_before><commit_msg>Add data migration to add category components for VPC.<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from waldur_mastermind.marketplace_openstack import STORAGE_TYPE, RAM_TYPE, CORES_TYPE, PACKAGE_TYPE
def create_category_components(apps, schema_editor):
CATEGORY_TITLE = 'Private clouds'
Category = apps.get_model('marketplace', 'Category')
CategoryComponent = apps.get_model('marketplace', 'CategoryComponent')
OfferingComponent = apps.get_model('marketplace', 'OfferingComponent')
try:
vpc_category = Category.objects.get(title=CATEGORY_TITLE)
except Category.DoesNotExist:
return
storage_gb_cc, _ = CategoryComponent.objects.get_or_create(
category=vpc_category,
type=STORAGE_TYPE,
name='Storage',
measured_unit='GB'
)
ram_gb_cc, _ = CategoryComponent.objects.get_or_create(
category=vpc_category,
type=RAM_TYPE,
name='RAM',
measured_unit='GB'
)
cores_cc, _ = CategoryComponent.objects.get_or_create(
category=vpc_category,
type=CORES_TYPE,
name='Cores',
measured_unit='cores'
)
components = OfferingComponent.objects.filter(offering__type=PACKAGE_TYPE, parent=None)
components.filter(type=STORAGE_TYPE).update(parent=storage_gb_cc)
components.filter(type=RAM_TYPE).update(parent=ram_gb_cc)
components.filter(type=CORES_TYPE).update(parent=cores_cc)
class Migration(migrations.Migration):
dependencies = [
('marketplace', '0082_orderitem_activated'),
]
operations = [
migrations.RunPython(create_category_components),
]
|
Add data migration to add category components for VPC.# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from waldur_mastermind.marketplace_openstack import STORAGE_TYPE, RAM_TYPE, CORES_TYPE, PACKAGE_TYPE
def create_category_components(apps, schema_editor):
CATEGORY_TITLE = 'Private clouds'
Category = apps.get_model('marketplace', 'Category')
CategoryComponent = apps.get_model('marketplace', 'CategoryComponent')
OfferingComponent = apps.get_model('marketplace', 'OfferingComponent')
try:
vpc_category = Category.objects.get(title=CATEGORY_TITLE)
except Category.DoesNotExist:
return
storage_gb_cc, _ = CategoryComponent.objects.get_or_create(
category=vpc_category,
type=STORAGE_TYPE,
name='Storage',
measured_unit='GB'
)
ram_gb_cc, _ = CategoryComponent.objects.get_or_create(
category=vpc_category,
type=RAM_TYPE,
name='RAM',
measured_unit='GB'
)
cores_cc, _ = CategoryComponent.objects.get_or_create(
category=vpc_category,
type=CORES_TYPE,
name='Cores',
measured_unit='cores'
)
components = OfferingComponent.objects.filter(offering__type=PACKAGE_TYPE, parent=None)
components.filter(type=STORAGE_TYPE).update(parent=storage_gb_cc)
components.filter(type=RAM_TYPE).update(parent=ram_gb_cc)
components.filter(type=CORES_TYPE).update(parent=cores_cc)
class Migration(migrations.Migration):
dependencies = [
('marketplace', '0082_orderitem_activated'),
]
operations = [
migrations.RunPython(create_category_components),
]
|
<commit_before><commit_msg>Add data migration to add category components for VPC.<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from waldur_mastermind.marketplace_openstack import STORAGE_TYPE, RAM_TYPE, CORES_TYPE, PACKAGE_TYPE
def create_category_components(apps, schema_editor):
CATEGORY_TITLE = 'Private clouds'
Category = apps.get_model('marketplace', 'Category')
CategoryComponent = apps.get_model('marketplace', 'CategoryComponent')
OfferingComponent = apps.get_model('marketplace', 'OfferingComponent')
try:
vpc_category = Category.objects.get(title=CATEGORY_TITLE)
except Category.DoesNotExist:
return
storage_gb_cc, _ = CategoryComponent.objects.get_or_create(
category=vpc_category,
type=STORAGE_TYPE,
name='Storage',
measured_unit='GB'
)
ram_gb_cc, _ = CategoryComponent.objects.get_or_create(
category=vpc_category,
type=RAM_TYPE,
name='RAM',
measured_unit='GB'
)
cores_cc, _ = CategoryComponent.objects.get_or_create(
category=vpc_category,
type=CORES_TYPE,
name='Cores',
measured_unit='cores'
)
components = OfferingComponent.objects.filter(offering__type=PACKAGE_TYPE, parent=None)
components.filter(type=STORAGE_TYPE).update(parent=storage_gb_cc)
components.filter(type=RAM_TYPE).update(parent=ram_gb_cc)
components.filter(type=CORES_TYPE).update(parent=cores_cc)
class Migration(migrations.Migration):
dependencies = [
('marketplace', '0082_orderitem_activated'),
]
operations = [
migrations.RunPython(create_category_components),
]
|
|
1c81643eaed91b4171a4e68699d930e5ef3688db
|
senlin/tests/tempest/api/policies/test_policy_validate_negative.py
|
senlin/tests/tempest/api/policies/test_policy_validate_negative.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from tempest.lib import decorators
from tempest.lib import exceptions
from tempest import test
from senlin.tests.tempest.api import base
from senlin.tests.tempest.common import constants
class TestPolicyValidateNegativeBadRequest(base.BaseSenlinAPITest):
@test.attr(type=['negative'])
@decorators.idempotent_id('4b55bb3e-12d6-4728-9b53-9db5094ac8b5')
def test_policy_validate_with_empty_body(self):
params = {
}
# Verify badrequest exception(400) is raised.
self.assertRaises(exceptions.BadRequest,
self.client.validate_obj,
'policies', params)
@test.attr(type=['negative'])
@decorators.idempotent_id('a1c35d93-2d19-4a72-919f-cfd70f5cbf06')
def test_policy_validate_no_spec(self):
params = {
'policy': {
}
}
# Verify badrequest exception(400) is raised.
self.assertRaises(exceptions.BadRequest,
self.client.validate_obj,
'policies', params)
@test.attr(type=['negative'])
@decorators.idempotent_id('6073da36-ee3e-4925-bce1-6c9a158e710d')
def test_policy_validate_policy_type_incorrect(self):
spec = copy.deepcopy(constants.spec_scaling_policy)
spec['type'] = 'senlin.policy.bogus'
params = {
'policy': {
'spce': spec
}
}
# Verify badrequest exception(400) is raised.
self.assertRaises(exceptions.BadRequest,
self.client.validate_obj,
'policies', params)
@test.attr(type=['negative'])
@decorators.idempotent_id('1e1833ea-4a67-4ac1-b6e2-f9afff51c945')
def test_policy_validate_spec_validation_failed(self):
spec = copy.deepcopy(constants.spec_scaling_policy)
spec['properties']['bogus'] = 'foo'
params = {
'policy': {
'spce': spec
}
}
# Verify badrequest exception(400) is raised.
self.assertRaises(exceptions.BadRequest,
self.client.validate_obj,
'policies', params)
|
Add negative API tests for policy validation
|
Add negative API tests for policy validation
Add negative API tests for policy validation
Change-Id: I0363bfd2165893b713059dc9db5ab9e632522091
|
Python
|
apache-2.0
|
openstack/senlin,openstack/senlin,stackforge/senlin,stackforge/senlin,openstack/senlin
|
Add negative API tests for policy validation
Add negative API tests for policy validation
Change-Id: I0363bfd2165893b713059dc9db5ab9e632522091
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from tempest.lib import decorators
from tempest.lib import exceptions
from tempest import test
from senlin.tests.tempest.api import base
from senlin.tests.tempest.common import constants
class TestPolicyValidateNegativeBadRequest(base.BaseSenlinAPITest):
@test.attr(type=['negative'])
@decorators.idempotent_id('4b55bb3e-12d6-4728-9b53-9db5094ac8b5')
def test_policy_validate_with_empty_body(self):
params = {
}
# Verify badrequest exception(400) is raised.
self.assertRaises(exceptions.BadRequest,
self.client.validate_obj,
'policies', params)
@test.attr(type=['negative'])
@decorators.idempotent_id('a1c35d93-2d19-4a72-919f-cfd70f5cbf06')
def test_policy_validate_no_spec(self):
params = {
'policy': {
}
}
# Verify badrequest exception(400) is raised.
self.assertRaises(exceptions.BadRequest,
self.client.validate_obj,
'policies', params)
@test.attr(type=['negative'])
@decorators.idempotent_id('6073da36-ee3e-4925-bce1-6c9a158e710d')
def test_policy_validate_policy_type_incorrect(self):
spec = copy.deepcopy(constants.spec_scaling_policy)
spec['type'] = 'senlin.policy.bogus'
params = {
'policy': {
'spce': spec
}
}
# Verify badrequest exception(400) is raised.
self.assertRaises(exceptions.BadRequest,
self.client.validate_obj,
'policies', params)
@test.attr(type=['negative'])
@decorators.idempotent_id('1e1833ea-4a67-4ac1-b6e2-f9afff51c945')
def test_policy_validate_spec_validation_failed(self):
spec = copy.deepcopy(constants.spec_scaling_policy)
spec['properties']['bogus'] = 'foo'
params = {
'policy': {
'spce': spec
}
}
# Verify badrequest exception(400) is raised.
self.assertRaises(exceptions.BadRequest,
self.client.validate_obj,
'policies', params)
|
<commit_before><commit_msg>Add negative API tests for policy validation
Add negative API tests for policy validation
Change-Id: I0363bfd2165893b713059dc9db5ab9e632522091<commit_after>
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from tempest.lib import decorators
from tempest.lib import exceptions
from tempest import test
from senlin.tests.tempest.api import base
from senlin.tests.tempest.common import constants
class TestPolicyValidateNegativeBadRequest(base.BaseSenlinAPITest):
@test.attr(type=['negative'])
@decorators.idempotent_id('4b55bb3e-12d6-4728-9b53-9db5094ac8b5')
def test_policy_validate_with_empty_body(self):
params = {
}
# Verify badrequest exception(400) is raised.
self.assertRaises(exceptions.BadRequest,
self.client.validate_obj,
'policies', params)
@test.attr(type=['negative'])
@decorators.idempotent_id('a1c35d93-2d19-4a72-919f-cfd70f5cbf06')
def test_policy_validate_no_spec(self):
params = {
'policy': {
}
}
# Verify badrequest exception(400) is raised.
self.assertRaises(exceptions.BadRequest,
self.client.validate_obj,
'policies', params)
@test.attr(type=['negative'])
@decorators.idempotent_id('6073da36-ee3e-4925-bce1-6c9a158e710d')
def test_policy_validate_policy_type_incorrect(self):
spec = copy.deepcopy(constants.spec_scaling_policy)
spec['type'] = 'senlin.policy.bogus'
params = {
'policy': {
'spce': spec
}
}
# Verify badrequest exception(400) is raised.
self.assertRaises(exceptions.BadRequest,
self.client.validate_obj,
'policies', params)
@test.attr(type=['negative'])
@decorators.idempotent_id('1e1833ea-4a67-4ac1-b6e2-f9afff51c945')
def test_policy_validate_spec_validation_failed(self):
spec = copy.deepcopy(constants.spec_scaling_policy)
spec['properties']['bogus'] = 'foo'
params = {
'policy': {
'spce': spec
}
}
# Verify badrequest exception(400) is raised.
self.assertRaises(exceptions.BadRequest,
self.client.validate_obj,
'policies', params)
|
Add negative API tests for policy validation
Add negative API tests for policy validation
Change-Id: I0363bfd2165893b713059dc9db5ab9e632522091# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from tempest.lib import decorators
from tempest.lib import exceptions
from tempest import test
from senlin.tests.tempest.api import base
from senlin.tests.tempest.common import constants
class TestPolicyValidateNegativeBadRequest(base.BaseSenlinAPITest):
@test.attr(type=['negative'])
@decorators.idempotent_id('4b55bb3e-12d6-4728-9b53-9db5094ac8b5')
def test_policy_validate_with_empty_body(self):
params = {
}
# Verify badrequest exception(400) is raised.
self.assertRaises(exceptions.BadRequest,
self.client.validate_obj,
'policies', params)
@test.attr(type=['negative'])
@decorators.idempotent_id('a1c35d93-2d19-4a72-919f-cfd70f5cbf06')
def test_policy_validate_no_spec(self):
params = {
'policy': {
}
}
# Verify badrequest exception(400) is raised.
self.assertRaises(exceptions.BadRequest,
self.client.validate_obj,
'policies', params)
@test.attr(type=['negative'])
@decorators.idempotent_id('6073da36-ee3e-4925-bce1-6c9a158e710d')
def test_policy_validate_policy_type_incorrect(self):
spec = copy.deepcopy(constants.spec_scaling_policy)
spec['type'] = 'senlin.policy.bogus'
params = {
'policy': {
'spce': spec
}
}
# Verify badrequest exception(400) is raised.
self.assertRaises(exceptions.BadRequest,
self.client.validate_obj,
'policies', params)
@test.attr(type=['negative'])
@decorators.idempotent_id('1e1833ea-4a67-4ac1-b6e2-f9afff51c945')
def test_policy_validate_spec_validation_failed(self):
spec = copy.deepcopy(constants.spec_scaling_policy)
spec['properties']['bogus'] = 'foo'
params = {
'policy': {
'spce': spec
}
}
# Verify badrequest exception(400) is raised.
self.assertRaises(exceptions.BadRequest,
self.client.validate_obj,
'policies', params)
|
<commit_before><commit_msg>Add negative API tests for policy validation
Add negative API tests for policy validation
Change-Id: I0363bfd2165893b713059dc9db5ab9e632522091<commit_after># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from tempest.lib import decorators
from tempest.lib import exceptions
from tempest import test
from senlin.tests.tempest.api import base
from senlin.tests.tempest.common import constants
class TestPolicyValidateNegativeBadRequest(base.BaseSenlinAPITest):
@test.attr(type=['negative'])
@decorators.idempotent_id('4b55bb3e-12d6-4728-9b53-9db5094ac8b5')
def test_policy_validate_with_empty_body(self):
params = {
}
# Verify badrequest exception(400) is raised.
self.assertRaises(exceptions.BadRequest,
self.client.validate_obj,
'policies', params)
@test.attr(type=['negative'])
@decorators.idempotent_id('a1c35d93-2d19-4a72-919f-cfd70f5cbf06')
def test_policy_validate_no_spec(self):
params = {
'policy': {
}
}
# Verify badrequest exception(400) is raised.
self.assertRaises(exceptions.BadRequest,
self.client.validate_obj,
'policies', params)
@test.attr(type=['negative'])
@decorators.idempotent_id('6073da36-ee3e-4925-bce1-6c9a158e710d')
def test_policy_validate_policy_type_incorrect(self):
spec = copy.deepcopy(constants.spec_scaling_policy)
spec['type'] = 'senlin.policy.bogus'
params = {
'policy': {
'spce': spec
}
}
# Verify badrequest exception(400) is raised.
self.assertRaises(exceptions.BadRequest,
self.client.validate_obj,
'policies', params)
@test.attr(type=['negative'])
@decorators.idempotent_id('1e1833ea-4a67-4ac1-b6e2-f9afff51c945')
def test_policy_validate_spec_validation_failed(self):
spec = copy.deepcopy(constants.spec_scaling_policy)
spec['properties']['bogus'] = 'foo'
params = {
'policy': {
'spce': spec
}
}
# Verify badrequest exception(400) is raised.
self.assertRaises(exceptions.BadRequest,
self.client.validate_obj,
'policies', params)
|
|
f040351dd3397ba7297b69b2468b2b37589c0d8f
|
games/management/commands/get_installer_urls.py
|
games/management/commands/get_installer_urls.py
|
import json
from collections import defaultdict
from django.core.management.base import BaseCommand
from common.util import load_yaml
from games import models
class Command(BaseCommand):
def handle(self, *args, **kwargs):
self.stdout.write("Installer stats\n")
installers = models.Installer.objects.all()
url_stats = defaultdict(list)
for installer in installers:
slug = installer.slug
installer_content = load_yaml(installer.content)
try:
files = installer_content.get("files", [])
except AttributeError:
print("Deleting installer %s" % installer)
installer.delete()
continue
if files is None:
print("Deleting installer %s" % installer)
installer.delete()
continue
for url_dict in files:
fileid = next(iter(url_dict))
try:
url = url_dict[fileid]
except TypeError:
print("Deleting installer %s" % installer)
installer.delete()
continue
if isinstance(url, str):
if url.startswith("N/A"):
continue
url_stats[url].append(slug)
elif isinstance(url, dict):
if url["url"].startswith("N/A"):
continue
url_stats[url["url"]].append(slug)
with open("installer-files.json", "w") as installer_files:
json.dump(url_stats, installer_files, indent=2)
|
Add task to get stats about files
|
Add task to get stats about files
|
Python
|
agpl-3.0
|
lutris/website,lutris/website,lutris/website,lutris/website
|
Add task to get stats about files
|
import json
from collections import defaultdict
from django.core.management.base import BaseCommand
from common.util import load_yaml
from games import models
class Command(BaseCommand):
def handle(self, *args, **kwargs):
self.stdout.write("Installer stats\n")
installers = models.Installer.objects.all()
url_stats = defaultdict(list)
for installer in installers:
slug = installer.slug
installer_content = load_yaml(installer.content)
try:
files = installer_content.get("files", [])
except AttributeError:
print("Deleting installer %s" % installer)
installer.delete()
continue
if files is None:
print("Deleting installer %s" % installer)
installer.delete()
continue
for url_dict in files:
fileid = next(iter(url_dict))
try:
url = url_dict[fileid]
except TypeError:
print("Deleting installer %s" % installer)
installer.delete()
continue
if isinstance(url, str):
if url.startswith("N/A"):
continue
url_stats[url].append(slug)
elif isinstance(url, dict):
if url["url"].startswith("N/A"):
continue
url_stats[url["url"]].append(slug)
with open("installer-files.json", "w") as installer_files:
json.dump(url_stats, installer_files, indent=2)
|
<commit_before><commit_msg>Add task to get stats about files<commit_after>
|
import json
from collections import defaultdict
from django.core.management.base import BaseCommand
from common.util import load_yaml
from games import models
class Command(BaseCommand):
def handle(self, *args, **kwargs):
self.stdout.write("Installer stats\n")
installers = models.Installer.objects.all()
url_stats = defaultdict(list)
for installer in installers:
slug = installer.slug
installer_content = load_yaml(installer.content)
try:
files = installer_content.get("files", [])
except AttributeError:
print("Deleting installer %s" % installer)
installer.delete()
continue
if files is None:
print("Deleting installer %s" % installer)
installer.delete()
continue
for url_dict in files:
fileid = next(iter(url_dict))
try:
url = url_dict[fileid]
except TypeError:
print("Deleting installer %s" % installer)
installer.delete()
continue
if isinstance(url, str):
if url.startswith("N/A"):
continue
url_stats[url].append(slug)
elif isinstance(url, dict):
if url["url"].startswith("N/A"):
continue
url_stats[url["url"]].append(slug)
with open("installer-files.json", "w") as installer_files:
json.dump(url_stats, installer_files, indent=2)
|
Add task to get stats about filesimport json
from collections import defaultdict
from django.core.management.base import BaseCommand
from common.util import load_yaml
from games import models
class Command(BaseCommand):
def handle(self, *args, **kwargs):
self.stdout.write("Installer stats\n")
installers = models.Installer.objects.all()
url_stats = defaultdict(list)
for installer in installers:
slug = installer.slug
installer_content = load_yaml(installer.content)
try:
files = installer_content.get("files", [])
except AttributeError:
print("Deleting installer %s" % installer)
installer.delete()
continue
if files is None:
print("Deleting installer %s" % installer)
installer.delete()
continue
for url_dict in files:
fileid = next(iter(url_dict))
try:
url = url_dict[fileid]
except TypeError:
print("Deleting installer %s" % installer)
installer.delete()
continue
if isinstance(url, str):
if url.startswith("N/A"):
continue
url_stats[url].append(slug)
elif isinstance(url, dict):
if url["url"].startswith("N/A"):
continue
url_stats[url["url"]].append(slug)
with open("installer-files.json", "w") as installer_files:
json.dump(url_stats, installer_files, indent=2)
|
<commit_before><commit_msg>Add task to get stats about files<commit_after>import json
from collections import defaultdict
from django.core.management.base import BaseCommand
from common.util import load_yaml
from games import models
class Command(BaseCommand):
def handle(self, *args, **kwargs):
self.stdout.write("Installer stats\n")
installers = models.Installer.objects.all()
url_stats = defaultdict(list)
for installer in installers:
slug = installer.slug
installer_content = load_yaml(installer.content)
try:
files = installer_content.get("files", [])
except AttributeError:
print("Deleting installer %s" % installer)
installer.delete()
continue
if files is None:
print("Deleting installer %s" % installer)
installer.delete()
continue
for url_dict in files:
fileid = next(iter(url_dict))
try:
url = url_dict[fileid]
except TypeError:
print("Deleting installer %s" % installer)
installer.delete()
continue
if isinstance(url, str):
if url.startswith("N/A"):
continue
url_stats[url].append(slug)
elif isinstance(url, dict):
if url["url"].startswith("N/A"):
continue
url_stats[url["url"]].append(slug)
with open("installer-files.json", "w") as installer_files:
json.dump(url_stats, installer_files, indent=2)
|
|
463502a251111199da130e508929a35b2f126f4e
|
bookmarks/models.py
|
bookmarks/models.py
|
from sqlalchemy import Column, Integer, String
from bookmarks.database import Base
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
username = Column(String(50), unique=True, nullable=False)
name = Column(String(120))
email = Column(String(256), unique=True, nullable=False)
def __init__(self, name=None, username=None, email=None):
self.username = username
self.name = name
self.email = email
def __repr__(self):
return '<User %r>' % (self.name)
|
Add columns to User model
|
Add columns to User model
|
Python
|
apache-2.0
|
byanofsky/bookmarks,byanofsky/bookmarks,byanofsky/bookmarks
|
Add columns to User model
|
from sqlalchemy import Column, Integer, String
from bookmarks.database import Base
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
username = Column(String(50), unique=True, nullable=False)
name = Column(String(120))
email = Column(String(256), unique=True, nullable=False)
def __init__(self, name=None, username=None, email=None):
self.username = username
self.name = name
self.email = email
def __repr__(self):
return '<User %r>' % (self.name)
|
<commit_before><commit_msg>Add columns to User model<commit_after>
|
from sqlalchemy import Column, Integer, String
from bookmarks.database import Base
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
username = Column(String(50), unique=True, nullable=False)
name = Column(String(120))
email = Column(String(256), unique=True, nullable=False)
def __init__(self, name=None, username=None, email=None):
self.username = username
self.name = name
self.email = email
def __repr__(self):
return '<User %r>' % (self.name)
|
Add columns to User modelfrom sqlalchemy import Column, Integer, String
from bookmarks.database import Base
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
username = Column(String(50), unique=True, nullable=False)
name = Column(String(120))
email = Column(String(256), unique=True, nullable=False)
def __init__(self, name=None, username=None, email=None):
self.username = username
self.name = name
self.email = email
def __repr__(self):
return '<User %r>' % (self.name)
|
<commit_before><commit_msg>Add columns to User model<commit_after>from sqlalchemy import Column, Integer, String
from bookmarks.database import Base
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
username = Column(String(50), unique=True, nullable=False)
name = Column(String(120))
email = Column(String(256), unique=True, nullable=False)
def __init__(self, name=None, username=None, email=None):
self.username = username
self.name = name
self.email = email
def __repr__(self):
return '<User %r>' % (self.name)
|
|
bb11ab050fe9a7bb0ffe83419eb0e87390f7deac
|
hopsutil/tensorboard.py
|
hopsutil/tensorboard.py
|
"""
Utility functions to retrieve information about available services and setting up security for the Hops platform.
These utils facilitates development by hiding complexity for programs interacting with Hops services.
"""
import socket
import subprocess
import os
import hdfs
def register(logdir):
#find free port
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('',0))
addr, port = s.getsockname()
s.close()
#let tb bind to port
subprocess.Popen([os.getenv("PYSPARK_PYTHON"), "tensorboard", "--logdir=%s"%logdir, "--port=%d"%port, "--debug"])
tb_url = "http://{0}:{1}".format(addr, port)
#dump tb host:port to hdfs
hops_user = os.environ["USER"];
hops_user_split = hops_user.split("__");
project = hops_user_split[0];
hdfs_handle = hdfs.get()
hdfs_handle.dump(tb_url, "hdfs:///Projects/" + project + "/Resources/.jupyter.tensorboard", user=hops_user)
|
Add registration method for TB
|
Add registration method for TB
|
Python
|
apache-2.0
|
hopshadoop/hops-util-py,hopshadoop/hops-util-py
|
Add registration method for TB
|
"""
Utility functions to retrieve information about available services and setting up security for the Hops platform.
These utils facilitates development by hiding complexity for programs interacting with Hops services.
"""
import socket
import subprocess
import os
import hdfs
def register(logdir):
#find free port
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('',0))
addr, port = s.getsockname()
s.close()
#let tb bind to port
subprocess.Popen([os.getenv("PYSPARK_PYTHON"), "tensorboard", "--logdir=%s"%logdir, "--port=%d"%port, "--debug"])
tb_url = "http://{0}:{1}".format(addr, port)
#dump tb host:port to hdfs
hops_user = os.environ["USER"];
hops_user_split = hops_user.split("__");
project = hops_user_split[0];
hdfs_handle = hdfs.get()
hdfs_handle.dump(tb_url, "hdfs:///Projects/" + project + "/Resources/.jupyter.tensorboard", user=hops_user)
|
<commit_before><commit_msg>Add registration method for TB<commit_after>
|
"""
Utility functions to retrieve information about available services and setting up security for the Hops platform.
These utils facilitates development by hiding complexity for programs interacting with Hops services.
"""
import socket
import subprocess
import os
import hdfs
def register(logdir):
#find free port
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('',0))
addr, port = s.getsockname()
s.close()
#let tb bind to port
subprocess.Popen([os.getenv("PYSPARK_PYTHON"), "tensorboard", "--logdir=%s"%logdir, "--port=%d"%port, "--debug"])
tb_url = "http://{0}:{1}".format(addr, port)
#dump tb host:port to hdfs
hops_user = os.environ["USER"];
hops_user_split = hops_user.split("__");
project = hops_user_split[0];
hdfs_handle = hdfs.get()
hdfs_handle.dump(tb_url, "hdfs:///Projects/" + project + "/Resources/.jupyter.tensorboard", user=hops_user)
|
Add registration method for TB"""
Utility functions to retrieve information about available services and setting up security for the Hops platform.
These utils facilitates development by hiding complexity for programs interacting with Hops services.
"""
import socket
import subprocess
import os
import hdfs
def register(logdir):
#find free port
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('',0))
addr, port = s.getsockname()
s.close()
#let tb bind to port
subprocess.Popen([os.getenv("PYSPARK_PYTHON"), "tensorboard", "--logdir=%s"%logdir, "--port=%d"%port, "--debug"])
tb_url = "http://{0}:{1}".format(addr, port)
#dump tb host:port to hdfs
hops_user = os.environ["USER"];
hops_user_split = hops_user.split("__");
project = hops_user_split[0];
hdfs_handle = hdfs.get()
hdfs_handle.dump(tb_url, "hdfs:///Projects/" + project + "/Resources/.jupyter.tensorboard", user=hops_user)
|
<commit_before><commit_msg>Add registration method for TB<commit_after>"""
Utility functions to retrieve information about available services and setting up security for the Hops platform.
These utils facilitates development by hiding complexity for programs interacting with Hops services.
"""
import socket
import subprocess
import os
import hdfs
def register(logdir):
#find free port
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('',0))
addr, port = s.getsockname()
s.close()
#let tb bind to port
subprocess.Popen([os.getenv("PYSPARK_PYTHON"), "tensorboard", "--logdir=%s"%logdir, "--port=%d"%port, "--debug"])
tb_url = "http://{0}:{1}".format(addr, port)
#dump tb host:port to hdfs
hops_user = os.environ["USER"];
hops_user_split = hops_user.split("__");
project = hops_user_split[0];
hdfs_handle = hdfs.get()
hdfs_handle.dump(tb_url, "hdfs:///Projects/" + project + "/Resources/.jupyter.tensorboard", user=hops_user)
|
|
24788b106b9cdd70e7240dc3eccac82fba290c85
|
tests/util/test_yaml.py
|
tests/util/test_yaml.py
|
"""Test Home Assistant yaml loader."""
import io
import unittest
from homeassistant.util import yaml
class TestYaml(unittest.TestCase):
"""Test util.yaml loader."""
def test_simple_list(self):
"""Test simple list."""
conf = "config:\n - simple\n - list"
with io.StringIO(conf) as f:
doc = yaml.yaml.safe_load(f)
assert doc['config'] == ["simple", "list"]
def test_simple_dict(self):
"""Test simple dict."""
conf = "key: value"
with io.StringIO(conf) as f:
doc = yaml.yaml.safe_load(f)
assert doc['key'] == 'value'
def test_duplicate_key(self):
"""Test simple dict."""
conf = "key: thing1\nkey: thing2"
try:
with io.StringIO(conf) as f:
yaml.yaml.safe_load(f)
except Exception:
pass
else:
assert 0
|
"""Test Home Assistant yaml loader."""
import io
import unittest
import os
from homeassistant.util import yaml
class TestYaml(unittest.TestCase):
"""Test util.yaml loader."""
def test_simple_list(self):
"""Test simple list."""
conf = "config:\n - simple\n - list"
with io.StringIO(conf) as f:
doc = yaml.yaml.safe_load(f)
assert doc['config'] == ["simple", "list"]
def test_simple_dict(self):
"""Test simple dict."""
conf = "key: value"
with io.StringIO(conf) as f:
doc = yaml.yaml.safe_load(f)
assert doc['key'] == 'value'
def test_duplicate_key(self):
"""Test simple dict."""
conf = "key: thing1\nkey: thing2"
try:
with io.StringIO(conf) as f:
yaml.yaml.safe_load(f)
except Exception:
pass
else:
assert 0
def test_enviroment_variable(self):
"""Test config file with enviroment variable."""
os.environ["PASSWORD"] = "secret_password"
conf = "password: !env_var PASSWORD"
with io.StringIO(conf) as f:
doc = yaml.yaml.safe_load(f)
assert doc['password'] == "secret_password"
del os.environ["PASSWORD"]
def test_invalid_enviroment_variable(self):
"""Test config file with no enviroment variable sat."""
conf = "password: !env_var PASSWORD"
try:
with io.StringIO(conf) as f:
yaml.yaml.safe_load(f)
except Exception:
pass
else:
assert 0
|
Add test for yaml enviroment
|
Add test for yaml enviroment
|
Python
|
mit
|
lukas-hetzenecker/home-assistant,LinuxChristian/home-assistant,molobrakos/home-assistant,sffjunkie/home-assistant,titilambert/home-assistant,ewandor/home-assistant,emilhetty/home-assistant,mikaelboman/home-assistant,nkgilley/home-assistant,robbiet480/home-assistant,jawilson/home-assistant,molobrakos/home-assistant,devdelay/home-assistant,florianholzapfel/home-assistant,deisi/home-assistant,betrisey/home-assistant,jaharkes/home-assistant,qedi-r/home-assistant,postlund/home-assistant,mezz64/home-assistant,eagleamon/home-assistant,w1ll1am23/home-assistant,varunr047/homefile,balloob/home-assistant,leoc/home-assistant,jabesq/home-assistant,MungoRae/home-assistant,Cinntax/home-assistant,nugget/home-assistant,soldag/home-assistant,dmeulen/home-assistant,happyleavesaoc/home-assistant,devdelay/home-assistant,sffjunkie/home-assistant,kennedyshead/home-assistant,sffjunkie/home-assistant,sander76/home-assistant,hexxter/home-assistant,balloob/home-assistant,Teagan42/home-assistant,DavidLP/home-assistant,Danielhiversen/home-assistant,robjohnson189/home-assistant,morphis/home-assistant,Zac-HD/home-assistant,ct-23/home-assistant,w1ll1am23/home-assistant,Danielhiversen/home-assistant,aronsky/home-assistant,kyvinh/home-assistant,emilhetty/home-assistant,hexxter/home-assistant,hmronline/home-assistant,varunr047/homefile,shaftoe/home-assistant,MartinHjelmare/home-assistant,shaftoe/home-assistant,deisi/home-assistant,joopert/home-assistant,open-homeautomation/home-assistant,open-homeautomation/home-assistant,dmeulen/home-assistant,oandrew/home-assistant,HydrelioxGitHub/home-assistant,deisi/home-assistant,leoc/home-assistant,emilhetty/home-assistant,postlund/home-assistant,ct-23/home-assistant,ct-23/home-assistant,auduny/home-assistant,betrisey/home-assistant,HydrelioxGitHub/home-assistant,PetePriority/home-assistant,leppa/home-assistant,home-assistant/home-assistant,PetePriority/home-assistant,sffjunkie/home-assistant,persandstrom/home-assistant,jabesq/home-assistant,robjohnson189/home-assistant,jamespcole/home-assistant,alexmogavero/home-assistant,oandrew/home-assistant,Smart-Torvy/torvy-home-assistant,philipbl/home-assistant,tchellomello/home-assistant,GenericStudent/home-assistant,rohitranjan1991/home-assistant,stefan-jonasson/home-assistant,xifle/home-assistant,philipbl/home-assistant,auduny/home-assistant,MungoRae/home-assistant,jaharkes/home-assistant,Zac-HD/home-assistant,soldag/home-assistant,tinloaf/home-assistant,bdfoster/blumate,morphis/home-assistant,keerts/home-assistant,morphis/home-assistant,jnewland/home-assistant,Smart-Torvy/torvy-home-assistant,partofthething/home-assistant,alexmogavero/home-assistant,eagleamon/home-assistant,PetePriority/home-assistant,mKeRix/home-assistant,florianholzapfel/home-assistant,tinloaf/home-assistant,kyvinh/home-assistant,nkgilley/home-assistant,persandstrom/home-assistant,kyvinh/home-assistant,Zac-HD/home-assistant,turbokongen/home-assistant,turbokongen/home-assistant,fbradyirl/home-assistant,JshWright/home-assistant,bdfoster/blumate,sffjunkie/home-assistant,partofthething/home-assistant,emilhetty/home-assistant,kennedyshead/home-assistant,aequitas/home-assistant,emilhetty/home-assistant,pschmitt/home-assistant,jabesq/home-assistant,varunr047/homefile,srcLurker/home-assistant,Smart-Torvy/torvy-home-assistant,Julian/home-assistant,toddeye/home-assistant,GenericStudent/home-assistant,mKeRix/home-assistant,srcLurker/home-assistant,rohitranjan1991/home-assistant,robjohnson189/home-assistant,robbiet480/home-assistant,stefan-jonasson/home-assistant,HydrelioxGitHub/home-assistant,jnewland/home-assistant,happyleavesaoc/home-assistant,adrienbrault/home-assistant,alexmogavero/home-assistant,DavidLP/home-assistant,philipbl/home-assistant,tboyce1/home-assistant,LinuxChristian/home-assistant,hmronline/home-assistant,MungoRae/home-assistant,mikaelboman/home-assistant,Duoxilian/home-assistant,hexxter/home-assistant,JshWright/home-assistant,bdfoster/blumate,fbradyirl/home-assistant,miniconfig/home-assistant,robjohnson189/home-assistant,nugget/home-assistant,Cinntax/home-assistant,betrisey/home-assistant,sdague/home-assistant,adrienbrault/home-assistant,tboyce1/home-assistant,home-assistant/home-assistant,lukas-hetzenecker/home-assistant,sdague/home-assistant,tchellomello/home-assistant,rohitranjan1991/home-assistant,toddeye/home-assistant,mKeRix/home-assistant,mKeRix/home-assistant,eagleamon/home-assistant,mikaelboman/home-assistant,aronsky/home-assistant,oandrew/home-assistant,eagleamon/home-assistant,morphis/home-assistant,ewandor/home-assistant,betrisey/home-assistant,ma314smith/home-assistant,sander76/home-assistant,balloob/home-assistant,jaharkes/home-assistant,dmeulen/home-assistant,miniconfig/home-assistant,keerts/home-assistant,aequitas/home-assistant,FreekingDean/home-assistant,Julian/home-assistant,Zac-HD/home-assistant,oandrew/home-assistant,ma314smith/home-assistant,MungoRae/home-assistant,nugget/home-assistant,open-homeautomation/home-assistant,ewandor/home-assistant,deisi/home-assistant,florianholzapfel/home-assistant,mikaelboman/home-assistant,titilambert/home-assistant,Teagan42/home-assistant,jaharkes/home-assistant,Duoxilian/home-assistant,shaftoe/home-assistant,leppa/home-assistant,keerts/home-assistant,mezz64/home-assistant,jawilson/home-assistant,deisi/home-assistant,jamespcole/home-assistant,Smart-Torvy/torvy-home-assistant,tboyce021/home-assistant,stefan-jonasson/home-assistant,stefan-jonasson/home-assistant,ct-23/home-assistant,DavidLP/home-assistant,auduny/home-assistant,pschmitt/home-assistant,ma314smith/home-assistant,persandstrom/home-assistant,varunr047/homefile,philipbl/home-assistant,joopert/home-assistant,devdelay/home-assistant,FreekingDean/home-assistant,happyleavesaoc/home-assistant,hmronline/home-assistant,xifle/home-assistant,tboyce021/home-assistant,xifle/home-assistant,miniconfig/home-assistant,aequitas/home-assistant,Duoxilian/home-assistant,bdfoster/blumate,JshWright/home-assistant,open-homeautomation/home-assistant,keerts/home-assistant,leoc/home-assistant,ma314smith/home-assistant,srcLurker/home-assistant,jamespcole/home-assistant,LinuxChristian/home-assistant,jnewland/home-assistant,varunr047/homefile,shaftoe/home-assistant,miniconfig/home-assistant,tboyce1/home-assistant,devdelay/home-assistant,happyleavesaoc/home-assistant,MungoRae/home-assistant,MartinHjelmare/home-assistant,hmronline/home-assistant,LinuxChristian/home-assistant,bdfoster/blumate,tinloaf/home-assistant,xifle/home-assistant,srcLurker/home-assistant,Julian/home-assistant,florianholzapfel/home-assistant,molobrakos/home-assistant,LinuxChristian/home-assistant,Julian/home-assistant,tboyce1/home-assistant,mikaelboman/home-assistant,leoc/home-assistant,kyvinh/home-assistant,hexxter/home-assistant,Duoxilian/home-assistant,hmronline/home-assistant,JshWright/home-assistant,MartinHjelmare/home-assistant,alexmogavero/home-assistant,qedi-r/home-assistant,dmeulen/home-assistant,fbradyirl/home-assistant,ct-23/home-assistant
|
"""Test Home Assistant yaml loader."""
import io
import unittest
from homeassistant.util import yaml
class TestYaml(unittest.TestCase):
"""Test util.yaml loader."""
def test_simple_list(self):
"""Test simple list."""
conf = "config:\n - simple\n - list"
with io.StringIO(conf) as f:
doc = yaml.yaml.safe_load(f)
assert doc['config'] == ["simple", "list"]
def test_simple_dict(self):
"""Test simple dict."""
conf = "key: value"
with io.StringIO(conf) as f:
doc = yaml.yaml.safe_load(f)
assert doc['key'] == 'value'
def test_duplicate_key(self):
"""Test simple dict."""
conf = "key: thing1\nkey: thing2"
try:
with io.StringIO(conf) as f:
yaml.yaml.safe_load(f)
except Exception:
pass
else:
assert 0
Add test for yaml enviroment
|
"""Test Home Assistant yaml loader."""
import io
import unittest
import os
from homeassistant.util import yaml
class TestYaml(unittest.TestCase):
"""Test util.yaml loader."""
def test_simple_list(self):
"""Test simple list."""
conf = "config:\n - simple\n - list"
with io.StringIO(conf) as f:
doc = yaml.yaml.safe_load(f)
assert doc['config'] == ["simple", "list"]
def test_simple_dict(self):
"""Test simple dict."""
conf = "key: value"
with io.StringIO(conf) as f:
doc = yaml.yaml.safe_load(f)
assert doc['key'] == 'value'
def test_duplicate_key(self):
"""Test simple dict."""
conf = "key: thing1\nkey: thing2"
try:
with io.StringIO(conf) as f:
yaml.yaml.safe_load(f)
except Exception:
pass
else:
assert 0
def test_enviroment_variable(self):
"""Test config file with enviroment variable."""
os.environ["PASSWORD"] = "secret_password"
conf = "password: !env_var PASSWORD"
with io.StringIO(conf) as f:
doc = yaml.yaml.safe_load(f)
assert doc['password'] == "secret_password"
del os.environ["PASSWORD"]
def test_invalid_enviroment_variable(self):
"""Test config file with no enviroment variable sat."""
conf = "password: !env_var PASSWORD"
try:
with io.StringIO(conf) as f:
yaml.yaml.safe_load(f)
except Exception:
pass
else:
assert 0
|
<commit_before>"""Test Home Assistant yaml loader."""
import io
import unittest
from homeassistant.util import yaml
class TestYaml(unittest.TestCase):
"""Test util.yaml loader."""
def test_simple_list(self):
"""Test simple list."""
conf = "config:\n - simple\n - list"
with io.StringIO(conf) as f:
doc = yaml.yaml.safe_load(f)
assert doc['config'] == ["simple", "list"]
def test_simple_dict(self):
"""Test simple dict."""
conf = "key: value"
with io.StringIO(conf) as f:
doc = yaml.yaml.safe_load(f)
assert doc['key'] == 'value'
def test_duplicate_key(self):
"""Test simple dict."""
conf = "key: thing1\nkey: thing2"
try:
with io.StringIO(conf) as f:
yaml.yaml.safe_load(f)
except Exception:
pass
else:
assert 0
<commit_msg>Add test for yaml enviroment<commit_after>
|
"""Test Home Assistant yaml loader."""
import io
import unittest
import os
from homeassistant.util import yaml
class TestYaml(unittest.TestCase):
"""Test util.yaml loader."""
def test_simple_list(self):
"""Test simple list."""
conf = "config:\n - simple\n - list"
with io.StringIO(conf) as f:
doc = yaml.yaml.safe_load(f)
assert doc['config'] == ["simple", "list"]
def test_simple_dict(self):
"""Test simple dict."""
conf = "key: value"
with io.StringIO(conf) as f:
doc = yaml.yaml.safe_load(f)
assert doc['key'] == 'value'
def test_duplicate_key(self):
"""Test simple dict."""
conf = "key: thing1\nkey: thing2"
try:
with io.StringIO(conf) as f:
yaml.yaml.safe_load(f)
except Exception:
pass
else:
assert 0
def test_enviroment_variable(self):
"""Test config file with enviroment variable."""
os.environ["PASSWORD"] = "secret_password"
conf = "password: !env_var PASSWORD"
with io.StringIO(conf) as f:
doc = yaml.yaml.safe_load(f)
assert doc['password'] == "secret_password"
del os.environ["PASSWORD"]
def test_invalid_enviroment_variable(self):
"""Test config file with no enviroment variable sat."""
conf = "password: !env_var PASSWORD"
try:
with io.StringIO(conf) as f:
yaml.yaml.safe_load(f)
except Exception:
pass
else:
assert 0
|
"""Test Home Assistant yaml loader."""
import io
import unittest
from homeassistant.util import yaml
class TestYaml(unittest.TestCase):
"""Test util.yaml loader."""
def test_simple_list(self):
"""Test simple list."""
conf = "config:\n - simple\n - list"
with io.StringIO(conf) as f:
doc = yaml.yaml.safe_load(f)
assert doc['config'] == ["simple", "list"]
def test_simple_dict(self):
"""Test simple dict."""
conf = "key: value"
with io.StringIO(conf) as f:
doc = yaml.yaml.safe_load(f)
assert doc['key'] == 'value'
def test_duplicate_key(self):
"""Test simple dict."""
conf = "key: thing1\nkey: thing2"
try:
with io.StringIO(conf) as f:
yaml.yaml.safe_load(f)
except Exception:
pass
else:
assert 0
Add test for yaml enviroment"""Test Home Assistant yaml loader."""
import io
import unittest
import os
from homeassistant.util import yaml
class TestYaml(unittest.TestCase):
"""Test util.yaml loader."""
def test_simple_list(self):
"""Test simple list."""
conf = "config:\n - simple\n - list"
with io.StringIO(conf) as f:
doc = yaml.yaml.safe_load(f)
assert doc['config'] == ["simple", "list"]
def test_simple_dict(self):
"""Test simple dict."""
conf = "key: value"
with io.StringIO(conf) as f:
doc = yaml.yaml.safe_load(f)
assert doc['key'] == 'value'
def test_duplicate_key(self):
"""Test simple dict."""
conf = "key: thing1\nkey: thing2"
try:
with io.StringIO(conf) as f:
yaml.yaml.safe_load(f)
except Exception:
pass
else:
assert 0
def test_enviroment_variable(self):
"""Test config file with enviroment variable."""
os.environ["PASSWORD"] = "secret_password"
conf = "password: !env_var PASSWORD"
with io.StringIO(conf) as f:
doc = yaml.yaml.safe_load(f)
assert doc['password'] == "secret_password"
del os.environ["PASSWORD"]
def test_invalid_enviroment_variable(self):
"""Test config file with no enviroment variable sat."""
conf = "password: !env_var PASSWORD"
try:
with io.StringIO(conf) as f:
yaml.yaml.safe_load(f)
except Exception:
pass
else:
assert 0
|
<commit_before>"""Test Home Assistant yaml loader."""
import io
import unittest
from homeassistant.util import yaml
class TestYaml(unittest.TestCase):
"""Test util.yaml loader."""
def test_simple_list(self):
"""Test simple list."""
conf = "config:\n - simple\n - list"
with io.StringIO(conf) as f:
doc = yaml.yaml.safe_load(f)
assert doc['config'] == ["simple", "list"]
def test_simple_dict(self):
"""Test simple dict."""
conf = "key: value"
with io.StringIO(conf) as f:
doc = yaml.yaml.safe_load(f)
assert doc['key'] == 'value'
def test_duplicate_key(self):
"""Test simple dict."""
conf = "key: thing1\nkey: thing2"
try:
with io.StringIO(conf) as f:
yaml.yaml.safe_load(f)
except Exception:
pass
else:
assert 0
<commit_msg>Add test for yaml enviroment<commit_after>"""Test Home Assistant yaml loader."""
import io
import unittest
import os
from homeassistant.util import yaml
class TestYaml(unittest.TestCase):
"""Test util.yaml loader."""
def test_simple_list(self):
"""Test simple list."""
conf = "config:\n - simple\n - list"
with io.StringIO(conf) as f:
doc = yaml.yaml.safe_load(f)
assert doc['config'] == ["simple", "list"]
def test_simple_dict(self):
"""Test simple dict."""
conf = "key: value"
with io.StringIO(conf) as f:
doc = yaml.yaml.safe_load(f)
assert doc['key'] == 'value'
def test_duplicate_key(self):
"""Test simple dict."""
conf = "key: thing1\nkey: thing2"
try:
with io.StringIO(conf) as f:
yaml.yaml.safe_load(f)
except Exception:
pass
else:
assert 0
def test_enviroment_variable(self):
"""Test config file with enviroment variable."""
os.environ["PASSWORD"] = "secret_password"
conf = "password: !env_var PASSWORD"
with io.StringIO(conf) as f:
doc = yaml.yaml.safe_load(f)
assert doc['password'] == "secret_password"
del os.environ["PASSWORD"]
def test_invalid_enviroment_variable(self):
"""Test config file with no enviroment variable sat."""
conf = "password: !env_var PASSWORD"
try:
with io.StringIO(conf) as f:
yaml.yaml.safe_load(f)
except Exception:
pass
else:
assert 0
|
96f224a6b80720a88fefc8530aea113f975ef110
|
new_layout.py
|
new_layout.py
|
import sublime, sublime_plugin
class NewLayoutCommand(sublime_plugin.TextCommand):
def run(self, edit, **args):
self.view.window().run_command("set_layout", args)
self.view.window().run_command("focus_group", { "group": 0 })
self.view.window().run_command("move_to_group", { "group": 1 } )
|
Add new layout window command
|
Add new layout window command
|
Python
|
mit
|
shaochuan/sublime-plugins
|
Add new layout window command
|
import sublime, sublime_plugin
class NewLayoutCommand(sublime_plugin.TextCommand):
def run(self, edit, **args):
self.view.window().run_command("set_layout", args)
self.view.window().run_command("focus_group", { "group": 0 })
self.view.window().run_command("move_to_group", { "group": 1 } )
|
<commit_before><commit_msg>Add new layout window command<commit_after>
|
import sublime, sublime_plugin
class NewLayoutCommand(sublime_plugin.TextCommand):
def run(self, edit, **args):
self.view.window().run_command("set_layout", args)
self.view.window().run_command("focus_group", { "group": 0 })
self.view.window().run_command("move_to_group", { "group": 1 } )
|
Add new layout window commandimport sublime, sublime_plugin
class NewLayoutCommand(sublime_plugin.TextCommand):
def run(self, edit, **args):
self.view.window().run_command("set_layout", args)
self.view.window().run_command("focus_group", { "group": 0 })
self.view.window().run_command("move_to_group", { "group": 1 } )
|
<commit_before><commit_msg>Add new layout window command<commit_after>import sublime, sublime_plugin
class NewLayoutCommand(sublime_plugin.TextCommand):
def run(self, edit, **args):
self.view.window().run_command("set_layout", args)
self.view.window().run_command("focus_group", { "group": 0 })
self.view.window().run_command("move_to_group", { "group": 1 } )
|
|
325465d18e963400b427f259547d4292a47368c9
|
oneflow/settings/snippets/common_development.py
|
oneflow/settings/snippets/common_development.py
|
#
# Include your development machines hostnames here.
#
# NOTE: this is not strictly needed, as Django doesn't enforce
# the check if DEBUG==True. But Just in case you wanted to disable
# it temporarily, this could be a good thing to have your hostname
# here.
#
# If you connect via http://localhost:8000/, everything is already OK.
TEMPLATE_CONTEXT_PROCESSORS += (
'django.core.context_processors.debug',
)
ALLOWED_HOSTS += [
'localhost',
'chani.licorn.org',
'leto.licorn.org',
'gurney.licorn.org'
]
|
#
# Include your development machines hostnames here.
#
# NOTE: this is not strictly needed, as Django doesn't enforce
# the check if DEBUG==True. But Just in case you wanted to disable
# it temporarily, this could be a good thing to have your hostname
# here.
#
# If you connect via http://localhost:8000/, everything is already OK.
TEMPLATE_CONTEXT_PROCESSORS += (
'django.core.context_processors.debug',
)
ALLOWED_HOSTS += [
'localhost',
'chani.licorn.org',
'leto.licorn.org',
'gurney.licorn.org'
]
INSTALLED_APPS += ('django_nose', )
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
|
Use Django nose for tests.
|
Use Django nose for tests.
|
Python
|
agpl-3.0
|
1flow/1flow,WillianPaiva/1flow,1flow/1flow,WillianPaiva/1flow,1flow/1flow,WillianPaiva/1flow,1flow/1flow,WillianPaiva/1flow,WillianPaiva/1flow,1flow/1flow
|
#
# Include your development machines hostnames here.
#
# NOTE: this is not strictly needed, as Django doesn't enforce
# the check if DEBUG==True. But Just in case you wanted to disable
# it temporarily, this could be a good thing to have your hostname
# here.
#
# If you connect via http://localhost:8000/, everything is already OK.
TEMPLATE_CONTEXT_PROCESSORS += (
'django.core.context_processors.debug',
)
ALLOWED_HOSTS += [
'localhost',
'chani.licorn.org',
'leto.licorn.org',
'gurney.licorn.org'
]
Use Django nose for tests.
|
#
# Include your development machines hostnames here.
#
# NOTE: this is not strictly needed, as Django doesn't enforce
# the check if DEBUG==True. But Just in case you wanted to disable
# it temporarily, this could be a good thing to have your hostname
# here.
#
# If you connect via http://localhost:8000/, everything is already OK.
TEMPLATE_CONTEXT_PROCESSORS += (
'django.core.context_processors.debug',
)
ALLOWED_HOSTS += [
'localhost',
'chani.licorn.org',
'leto.licorn.org',
'gurney.licorn.org'
]
INSTALLED_APPS += ('django_nose', )
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
|
<commit_before>#
# Include your development machines hostnames here.
#
# NOTE: this is not strictly needed, as Django doesn't enforce
# the check if DEBUG==True. But Just in case you wanted to disable
# it temporarily, this could be a good thing to have your hostname
# here.
#
# If you connect via http://localhost:8000/, everything is already OK.
TEMPLATE_CONTEXT_PROCESSORS += (
'django.core.context_processors.debug',
)
ALLOWED_HOSTS += [
'localhost',
'chani.licorn.org',
'leto.licorn.org',
'gurney.licorn.org'
]
<commit_msg>Use Django nose for tests.<commit_after>
|
#
# Include your development machines hostnames here.
#
# NOTE: this is not strictly needed, as Django doesn't enforce
# the check if DEBUG==True. But Just in case you wanted to disable
# it temporarily, this could be a good thing to have your hostname
# here.
#
# If you connect via http://localhost:8000/, everything is already OK.
TEMPLATE_CONTEXT_PROCESSORS += (
'django.core.context_processors.debug',
)
ALLOWED_HOSTS += [
'localhost',
'chani.licorn.org',
'leto.licorn.org',
'gurney.licorn.org'
]
INSTALLED_APPS += ('django_nose', )
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
|
#
# Include your development machines hostnames here.
#
# NOTE: this is not strictly needed, as Django doesn't enforce
# the check if DEBUG==True. But Just in case you wanted to disable
# it temporarily, this could be a good thing to have your hostname
# here.
#
# If you connect via http://localhost:8000/, everything is already OK.
TEMPLATE_CONTEXT_PROCESSORS += (
'django.core.context_processors.debug',
)
ALLOWED_HOSTS += [
'localhost',
'chani.licorn.org',
'leto.licorn.org',
'gurney.licorn.org'
]
Use Django nose for tests.#
# Include your development machines hostnames here.
#
# NOTE: this is not strictly needed, as Django doesn't enforce
# the check if DEBUG==True. But Just in case you wanted to disable
# it temporarily, this could be a good thing to have your hostname
# here.
#
# If you connect via http://localhost:8000/, everything is already OK.
TEMPLATE_CONTEXT_PROCESSORS += (
'django.core.context_processors.debug',
)
ALLOWED_HOSTS += [
'localhost',
'chani.licorn.org',
'leto.licorn.org',
'gurney.licorn.org'
]
INSTALLED_APPS += ('django_nose', )
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
|
<commit_before>#
# Include your development machines hostnames here.
#
# NOTE: this is not strictly needed, as Django doesn't enforce
# the check if DEBUG==True. But Just in case you wanted to disable
# it temporarily, this could be a good thing to have your hostname
# here.
#
# If you connect via http://localhost:8000/, everything is already OK.
TEMPLATE_CONTEXT_PROCESSORS += (
'django.core.context_processors.debug',
)
ALLOWED_HOSTS += [
'localhost',
'chani.licorn.org',
'leto.licorn.org',
'gurney.licorn.org'
]
<commit_msg>Use Django nose for tests.<commit_after>#
# Include your development machines hostnames here.
#
# NOTE: this is not strictly needed, as Django doesn't enforce
# the check if DEBUG==True. But Just in case you wanted to disable
# it temporarily, this could be a good thing to have your hostname
# here.
#
# If you connect via http://localhost:8000/, everything is already OK.
TEMPLATE_CONTEXT_PROCESSORS += (
'django.core.context_processors.debug',
)
ALLOWED_HOSTS += [
'localhost',
'chani.licorn.org',
'leto.licorn.org',
'gurney.licorn.org'
]
INSTALLED_APPS += ('django_nose', )
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
|
8eeb4c2db613c1354c38696ac6691cf79f66a383
|
locations/spiders/brookdale.py
|
locations/spiders/brookdale.py
|
# -*- coding: utf-8 -*-
import scrapy
import json
from locations.items import GeojsonPointItem
URL = 'https://www.brookdale.com/bin/brookdale/community-search?care_type_category=resident&loc=&finrpt=&state='
US_STATES = (
"AL", "AZ", "AR", "CA", "CO", "CT", "DE", "FL", "GA",
"ID", "IL", "IN", "IA", "KS", "KY", "LA", "MD",
"MA", "MI", "MN", "MS", "MO", "MT", "NE", "NV", "NH", "NJ",
"NM", "NY", "NC", "OH", "OK", "OR", "PA", "RI", "SC",
"TN", "TX", "UT", "VT", "VA", "WA", "WV", "WI", "WY",
)
class TemplateSpider(scrapy.Spider):
name = "brookdale"
allowed_domains = ["www.brookdale.com"]
def start_requests(self):
for state in US_STATES:
url = ''.join([URL, state])
yield scrapy.Request(url, callback=self.parse_info)
def parse_info(self, response):
data = json.loads(response.body_as_unicode())
i = 0
while i < len(data):
print(data[i]['name'])
properties = {
"ref": data[i]['community_id'],
"name": data[i]['name'],
"lat": data[i]['latitude'],
"lon": data[i]['longitude'],
"addr_full": data[i]['address1'],
"city": data[i]['city'],
"state": data[i]['state'],
"country": data[i]['country_code'],
"postcode": data[i]['zip_postal_code'],
"website": data[i]['website'],
"phone": data[i]['contact_center_phone'],
}
yield GeojsonPointItem(**properties)
i += 1
|
Add spider for Brookdale Senior Living
|
Add spider for Brookdale Senior Living
|
Python
|
mit
|
iandees/all-the-places,iandees/all-the-places,iandees/all-the-places
|
Add spider for Brookdale Senior Living
|
# -*- coding: utf-8 -*-
import scrapy
import json
from locations.items import GeojsonPointItem
URL = 'https://www.brookdale.com/bin/brookdale/community-search?care_type_category=resident&loc=&finrpt=&state='
US_STATES = (
"AL", "AZ", "AR", "CA", "CO", "CT", "DE", "FL", "GA",
"ID", "IL", "IN", "IA", "KS", "KY", "LA", "MD",
"MA", "MI", "MN", "MS", "MO", "MT", "NE", "NV", "NH", "NJ",
"NM", "NY", "NC", "OH", "OK", "OR", "PA", "RI", "SC",
"TN", "TX", "UT", "VT", "VA", "WA", "WV", "WI", "WY",
)
class TemplateSpider(scrapy.Spider):
name = "brookdale"
allowed_domains = ["www.brookdale.com"]
def start_requests(self):
for state in US_STATES:
url = ''.join([URL, state])
yield scrapy.Request(url, callback=self.parse_info)
def parse_info(self, response):
data = json.loads(response.body_as_unicode())
i = 0
while i < len(data):
print(data[i]['name'])
properties = {
"ref": data[i]['community_id'],
"name": data[i]['name'],
"lat": data[i]['latitude'],
"lon": data[i]['longitude'],
"addr_full": data[i]['address1'],
"city": data[i]['city'],
"state": data[i]['state'],
"country": data[i]['country_code'],
"postcode": data[i]['zip_postal_code'],
"website": data[i]['website'],
"phone": data[i]['contact_center_phone'],
}
yield GeojsonPointItem(**properties)
i += 1
|
<commit_before><commit_msg>Add spider for Brookdale Senior Living<commit_after>
|
# -*- coding: utf-8 -*-
import scrapy
import json
from locations.items import GeojsonPointItem
URL = 'https://www.brookdale.com/bin/brookdale/community-search?care_type_category=resident&loc=&finrpt=&state='
US_STATES = (
"AL", "AZ", "AR", "CA", "CO", "CT", "DE", "FL", "GA",
"ID", "IL", "IN", "IA", "KS", "KY", "LA", "MD",
"MA", "MI", "MN", "MS", "MO", "MT", "NE", "NV", "NH", "NJ",
"NM", "NY", "NC", "OH", "OK", "OR", "PA", "RI", "SC",
"TN", "TX", "UT", "VT", "VA", "WA", "WV", "WI", "WY",
)
class TemplateSpider(scrapy.Spider):
name = "brookdale"
allowed_domains = ["www.brookdale.com"]
def start_requests(self):
for state in US_STATES:
url = ''.join([URL, state])
yield scrapy.Request(url, callback=self.parse_info)
def parse_info(self, response):
data = json.loads(response.body_as_unicode())
i = 0
while i < len(data):
print(data[i]['name'])
properties = {
"ref": data[i]['community_id'],
"name": data[i]['name'],
"lat": data[i]['latitude'],
"lon": data[i]['longitude'],
"addr_full": data[i]['address1'],
"city": data[i]['city'],
"state": data[i]['state'],
"country": data[i]['country_code'],
"postcode": data[i]['zip_postal_code'],
"website": data[i]['website'],
"phone": data[i]['contact_center_phone'],
}
yield GeojsonPointItem(**properties)
i += 1
|
Add spider for Brookdale Senior Living# -*- coding: utf-8 -*-
import scrapy
import json
from locations.items import GeojsonPointItem
URL = 'https://www.brookdale.com/bin/brookdale/community-search?care_type_category=resident&loc=&finrpt=&state='
US_STATES = (
"AL", "AZ", "AR", "CA", "CO", "CT", "DE", "FL", "GA",
"ID", "IL", "IN", "IA", "KS", "KY", "LA", "MD",
"MA", "MI", "MN", "MS", "MO", "MT", "NE", "NV", "NH", "NJ",
"NM", "NY", "NC", "OH", "OK", "OR", "PA", "RI", "SC",
"TN", "TX", "UT", "VT", "VA", "WA", "WV", "WI", "WY",
)
class TemplateSpider(scrapy.Spider):
name = "brookdale"
allowed_domains = ["www.brookdale.com"]
def start_requests(self):
for state in US_STATES:
url = ''.join([URL, state])
yield scrapy.Request(url, callback=self.parse_info)
def parse_info(self, response):
data = json.loads(response.body_as_unicode())
i = 0
while i < len(data):
print(data[i]['name'])
properties = {
"ref": data[i]['community_id'],
"name": data[i]['name'],
"lat": data[i]['latitude'],
"lon": data[i]['longitude'],
"addr_full": data[i]['address1'],
"city": data[i]['city'],
"state": data[i]['state'],
"country": data[i]['country_code'],
"postcode": data[i]['zip_postal_code'],
"website": data[i]['website'],
"phone": data[i]['contact_center_phone'],
}
yield GeojsonPointItem(**properties)
i += 1
|
<commit_before><commit_msg>Add spider for Brookdale Senior Living<commit_after># -*- coding: utf-8 -*-
import scrapy
import json
from locations.items import GeojsonPointItem
URL = 'https://www.brookdale.com/bin/brookdale/community-search?care_type_category=resident&loc=&finrpt=&state='
US_STATES = (
"AL", "AZ", "AR", "CA", "CO", "CT", "DE", "FL", "GA",
"ID", "IL", "IN", "IA", "KS", "KY", "LA", "MD",
"MA", "MI", "MN", "MS", "MO", "MT", "NE", "NV", "NH", "NJ",
"NM", "NY", "NC", "OH", "OK", "OR", "PA", "RI", "SC",
"TN", "TX", "UT", "VT", "VA", "WA", "WV", "WI", "WY",
)
class TemplateSpider(scrapy.Spider):
name = "brookdale"
allowed_domains = ["www.brookdale.com"]
def start_requests(self):
for state in US_STATES:
url = ''.join([URL, state])
yield scrapy.Request(url, callback=self.parse_info)
def parse_info(self, response):
data = json.loads(response.body_as_unicode())
i = 0
while i < len(data):
print(data[i]['name'])
properties = {
"ref": data[i]['community_id'],
"name": data[i]['name'],
"lat": data[i]['latitude'],
"lon": data[i]['longitude'],
"addr_full": data[i]['address1'],
"city": data[i]['city'],
"state": data[i]['state'],
"country": data[i]['country_code'],
"postcode": data[i]['zip_postal_code'],
"website": data[i]['website'],
"phone": data[i]['contact_center_phone'],
}
yield GeojsonPointItem(**properties)
i += 1
|
|
0403d6f78189be3f3b22f068dad1db0c53687ef7
|
ptch/__init__.py
|
ptch/__init__.py
|
# -*- coding: utf-8 -*-
"""
PTCH files are a container format for Blizzard patch files.
They begin with a 72 byte header containing some metadata, immediately
followed by a RLE-packed BSDIFF40.
The original BSDIFF40 format is compressed with bzip2 instead of RLE.
"""
#from hashlib import md5
from struct import unpack
from binascii import hexlify
from cStringIO import StringIO
class PatchFile(object):
def __init__(self, file):
# Parse the header
file.seek(0)
assert file.read(4) == "PTCH"
unk1 = file.read(4)
self.sizeBefore, self.sizeAfter = unpack("ii", file.read(8))
assert file.read(4) == "MD5_"
assert unpack("i", file.read(4)) == (0x28, )
self.md5Before, self.md5After = unpack("16s16s", file.read(32))
self.md5Before, self.md5After = hexlify(self.md5Before), hexlify(self.md5After)
assert file.read(4) == "XFRM"
file.read(4)
assert file.read(4) == "BSD0"
self.fileSize, = unpack("i", file.read(4))
self.compressedDiff = file.read()
file.close()
def __repr__(self):
header = ("sizeBefore", "sizeAfter", "md5Before", "md5After", "fileSize")
return "%s(%s)" % (self.__class__.__name__, ", ".join("%s=%r" % (k, getattr(self, k)) for k in header))
def rleUnpack(self):
"""
Read the RLE-packed data and
return the unpacked output.
"""
data = StringIO(self.compressedDiff)
ret = []
byte = data.read(1)
while byte:
byte = ord(byte)
# Is it a repeat control?
if byte & 0x80:
count = (byte & 0x7F) + 1
ret.append(data.read(count))
else:
ret.append("\0" * (byte+1))
byte = data.read(1)
return "".join(ret)
|
Add ptch module and base PatchFile class. This class can unpack RLE-compressed patchfiles.
|
ptch: Add ptch module and base PatchFile class. This class can unpack RLE-compressed patchfiles.
|
Python
|
cc0-1.0
|
jleclanche/pywow,jleclanche/pywow,jleclanche/pywow,jleclanche/pywow,jleclanche/pywow,jleclanche/pywow
|
ptch: Add ptch module and base PatchFile class. This class can unpack RLE-compressed patchfiles.
|
# -*- coding: utf-8 -*-
"""
PTCH files are a container format for Blizzard patch files.
They begin with a 72 byte header containing some metadata, immediately
followed by a RLE-packed BSDIFF40.
The original BSDIFF40 format is compressed with bzip2 instead of RLE.
"""
#from hashlib import md5
from struct import unpack
from binascii import hexlify
from cStringIO import StringIO
class PatchFile(object):
def __init__(self, file):
# Parse the header
file.seek(0)
assert file.read(4) == "PTCH"
unk1 = file.read(4)
self.sizeBefore, self.sizeAfter = unpack("ii", file.read(8))
assert file.read(4) == "MD5_"
assert unpack("i", file.read(4)) == (0x28, )
self.md5Before, self.md5After = unpack("16s16s", file.read(32))
self.md5Before, self.md5After = hexlify(self.md5Before), hexlify(self.md5After)
assert file.read(4) == "XFRM"
file.read(4)
assert file.read(4) == "BSD0"
self.fileSize, = unpack("i", file.read(4))
self.compressedDiff = file.read()
file.close()
def __repr__(self):
header = ("sizeBefore", "sizeAfter", "md5Before", "md5After", "fileSize")
return "%s(%s)" % (self.__class__.__name__, ", ".join("%s=%r" % (k, getattr(self, k)) for k in header))
def rleUnpack(self):
"""
Read the RLE-packed data and
return the unpacked output.
"""
data = StringIO(self.compressedDiff)
ret = []
byte = data.read(1)
while byte:
byte = ord(byte)
# Is it a repeat control?
if byte & 0x80:
count = (byte & 0x7F) + 1
ret.append(data.read(count))
else:
ret.append("\0" * (byte+1))
byte = data.read(1)
return "".join(ret)
|
<commit_before><commit_msg>ptch: Add ptch module and base PatchFile class. This class can unpack RLE-compressed patchfiles.<commit_after>
|
# -*- coding: utf-8 -*-
"""
PTCH files are a container format for Blizzard patch files.
They begin with a 72 byte header containing some metadata, immediately
followed by a RLE-packed BSDIFF40.
The original BSDIFF40 format is compressed with bzip2 instead of RLE.
"""
#from hashlib import md5
from struct import unpack
from binascii import hexlify
from cStringIO import StringIO
class PatchFile(object):
def __init__(self, file):
# Parse the header
file.seek(0)
assert file.read(4) == "PTCH"
unk1 = file.read(4)
self.sizeBefore, self.sizeAfter = unpack("ii", file.read(8))
assert file.read(4) == "MD5_"
assert unpack("i", file.read(4)) == (0x28, )
self.md5Before, self.md5After = unpack("16s16s", file.read(32))
self.md5Before, self.md5After = hexlify(self.md5Before), hexlify(self.md5After)
assert file.read(4) == "XFRM"
file.read(4)
assert file.read(4) == "BSD0"
self.fileSize, = unpack("i", file.read(4))
self.compressedDiff = file.read()
file.close()
def __repr__(self):
header = ("sizeBefore", "sizeAfter", "md5Before", "md5After", "fileSize")
return "%s(%s)" % (self.__class__.__name__, ", ".join("%s=%r" % (k, getattr(self, k)) for k in header))
def rleUnpack(self):
"""
Read the RLE-packed data and
return the unpacked output.
"""
data = StringIO(self.compressedDiff)
ret = []
byte = data.read(1)
while byte:
byte = ord(byte)
# Is it a repeat control?
if byte & 0x80:
count = (byte & 0x7F) + 1
ret.append(data.read(count))
else:
ret.append("\0" * (byte+1))
byte = data.read(1)
return "".join(ret)
|
ptch: Add ptch module and base PatchFile class. This class can unpack RLE-compressed patchfiles.# -*- coding: utf-8 -*-
"""
PTCH files are a container format for Blizzard patch files.
They begin with a 72 byte header containing some metadata, immediately
followed by a RLE-packed BSDIFF40.
The original BSDIFF40 format is compressed with bzip2 instead of RLE.
"""
#from hashlib import md5
from struct import unpack
from binascii import hexlify
from cStringIO import StringIO
class PatchFile(object):
def __init__(self, file):
# Parse the header
file.seek(0)
assert file.read(4) == "PTCH"
unk1 = file.read(4)
self.sizeBefore, self.sizeAfter = unpack("ii", file.read(8))
assert file.read(4) == "MD5_"
assert unpack("i", file.read(4)) == (0x28, )
self.md5Before, self.md5After = unpack("16s16s", file.read(32))
self.md5Before, self.md5After = hexlify(self.md5Before), hexlify(self.md5After)
assert file.read(4) == "XFRM"
file.read(4)
assert file.read(4) == "BSD0"
self.fileSize, = unpack("i", file.read(4))
self.compressedDiff = file.read()
file.close()
def __repr__(self):
header = ("sizeBefore", "sizeAfter", "md5Before", "md5After", "fileSize")
return "%s(%s)" % (self.__class__.__name__, ", ".join("%s=%r" % (k, getattr(self, k)) for k in header))
def rleUnpack(self):
"""
Read the RLE-packed data and
return the unpacked output.
"""
data = StringIO(self.compressedDiff)
ret = []
byte = data.read(1)
while byte:
byte = ord(byte)
# Is it a repeat control?
if byte & 0x80:
count = (byte & 0x7F) + 1
ret.append(data.read(count))
else:
ret.append("\0" * (byte+1))
byte = data.read(1)
return "".join(ret)
|
<commit_before><commit_msg>ptch: Add ptch module and base PatchFile class. This class can unpack RLE-compressed patchfiles.<commit_after># -*- coding: utf-8 -*-
"""
PTCH files are a container format for Blizzard patch files.
They begin with a 72 byte header containing some metadata, immediately
followed by a RLE-packed BSDIFF40.
The original BSDIFF40 format is compressed with bzip2 instead of RLE.
"""
#from hashlib import md5
from struct import unpack
from binascii import hexlify
from cStringIO import StringIO
class PatchFile(object):
def __init__(self, file):
# Parse the header
file.seek(0)
assert file.read(4) == "PTCH"
unk1 = file.read(4)
self.sizeBefore, self.sizeAfter = unpack("ii", file.read(8))
assert file.read(4) == "MD5_"
assert unpack("i", file.read(4)) == (0x28, )
self.md5Before, self.md5After = unpack("16s16s", file.read(32))
self.md5Before, self.md5After = hexlify(self.md5Before), hexlify(self.md5After)
assert file.read(4) == "XFRM"
file.read(4)
assert file.read(4) == "BSD0"
self.fileSize, = unpack("i", file.read(4))
self.compressedDiff = file.read()
file.close()
def __repr__(self):
header = ("sizeBefore", "sizeAfter", "md5Before", "md5After", "fileSize")
return "%s(%s)" % (self.__class__.__name__, ", ".join("%s=%r" % (k, getattr(self, k)) for k in header))
def rleUnpack(self):
"""
Read the RLE-packed data and
return the unpacked output.
"""
data = StringIO(self.compressedDiff)
ret = []
byte = data.read(1)
while byte:
byte = ord(byte)
# Is it a repeat control?
if byte & 0x80:
count = (byte & 0x7F) + 1
ret.append(data.read(count))
else:
ret.append("\0" * (byte+1))
byte = data.read(1)
return "".join(ret)
|
|
841e8fe236eab35b803cb9d8bec201306ce4642e
|
util/repeat_rum_file.py
|
util/repeat_rum_file.py
|
from rum_mapping_stats import aln_iter
import argparse
import sys
parser = argparse.ArgumentParser()
parser.add_argument('--times', type=int)
parser.add_argument('--max-seq', type=int)
parser.add_argument('rum_file', type=file)
args = parser.parse_args()
alns = list(aln_iter(args.rum_file))
for t in range(args.times):
for aln in alns:
old_read_num = aln.read_num
aln.read_num = old_read_num + t * args.max_seq
aln.write(sys.stdout)
aln.read_num = old_read_num
|
Add script to generate big RUM_* files
|
Add script to generate big RUM_* files
|
Python
|
mit
|
itmat/rum,itmat/rum,itmat/rum
|
Add script to generate big RUM_* files
|
from rum_mapping_stats import aln_iter
import argparse
import sys
parser = argparse.ArgumentParser()
parser.add_argument('--times', type=int)
parser.add_argument('--max-seq', type=int)
parser.add_argument('rum_file', type=file)
args = parser.parse_args()
alns = list(aln_iter(args.rum_file))
for t in range(args.times):
for aln in alns:
old_read_num = aln.read_num
aln.read_num = old_read_num + t * args.max_seq
aln.write(sys.stdout)
aln.read_num = old_read_num
|
<commit_before><commit_msg>Add script to generate big RUM_* files<commit_after>
|
from rum_mapping_stats import aln_iter
import argparse
import sys
parser = argparse.ArgumentParser()
parser.add_argument('--times', type=int)
parser.add_argument('--max-seq', type=int)
parser.add_argument('rum_file', type=file)
args = parser.parse_args()
alns = list(aln_iter(args.rum_file))
for t in range(args.times):
for aln in alns:
old_read_num = aln.read_num
aln.read_num = old_read_num + t * args.max_seq
aln.write(sys.stdout)
aln.read_num = old_read_num
|
Add script to generate big RUM_* filesfrom rum_mapping_stats import aln_iter
import argparse
import sys
parser = argparse.ArgumentParser()
parser.add_argument('--times', type=int)
parser.add_argument('--max-seq', type=int)
parser.add_argument('rum_file', type=file)
args = parser.parse_args()
alns = list(aln_iter(args.rum_file))
for t in range(args.times):
for aln in alns:
old_read_num = aln.read_num
aln.read_num = old_read_num + t * args.max_seq
aln.write(sys.stdout)
aln.read_num = old_read_num
|
<commit_before><commit_msg>Add script to generate big RUM_* files<commit_after>from rum_mapping_stats import aln_iter
import argparse
import sys
parser = argparse.ArgumentParser()
parser.add_argument('--times', type=int)
parser.add_argument('--max-seq', type=int)
parser.add_argument('rum_file', type=file)
args = parser.parse_args()
alns = list(aln_iter(args.rum_file))
for t in range(args.times):
for aln in alns:
old_read_num = aln.read_num
aln.read_num = old_read_num + t * args.max_seq
aln.write(sys.stdout)
aln.read_num = old_read_num
|
|
7060b82030d719cdcbdcecdb5eb7d34b405aa805
|
platforms/migrations/0003_auto_20150718_0050.py
|
platforms/migrations/0003_auto_20150718_0050.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('platforms', '0002_auto_20150718_0042'),
]
operations = [
migrations.AlterField(
model_name='platform',
name='default_installer',
field=jsonfield.fields.JSONField(null=True),
),
]
|
Make the migration for previous commit
|
Make the migration for previous commit
|
Python
|
agpl-3.0
|
Turupawn/website,lutris/website,lutris/website,Turupawn/website,Turupawn/website,lutris/website,lutris/website,Turupawn/website
|
Make the migration for previous commit
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('platforms', '0002_auto_20150718_0042'),
]
operations = [
migrations.AlterField(
model_name='platform',
name='default_installer',
field=jsonfield.fields.JSONField(null=True),
),
]
|
<commit_before><commit_msg>Make the migration for previous commit<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('platforms', '0002_auto_20150718_0042'),
]
operations = [
migrations.AlterField(
model_name='platform',
name='default_installer',
field=jsonfield.fields.JSONField(null=True),
),
]
|
Make the migration for previous commit# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('platforms', '0002_auto_20150718_0042'),
]
operations = [
migrations.AlterField(
model_name='platform',
name='default_installer',
field=jsonfield.fields.JSONField(null=True),
),
]
|
<commit_before><commit_msg>Make the migration for previous commit<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('platforms', '0002_auto_20150718_0042'),
]
operations = [
migrations.AlterField(
model_name='platform',
name='default_installer',
field=jsonfield.fields.JSONField(null=True),
),
]
|
|
49b1de4a68133e618723f96f2dc922b311bdd982
|
util/encode_raw.py
|
util/encode_raw.py
|
#!/usr/bin/env python
# Converts raw RGB565 video to MP4/AVI
from sys import argv, exit
from array import array
from subprocess import call
buf=None
TMP_FILE = "/tmp/video.raw"
if (len(argv) != 4):
print("Usage: encode_raw input.raw output.avi fps")
exit(1)
with open(argv[1], "rb") as f:
buf = array("H", f.read())
#Swap not needed if rgb565be is supported
buf.byteswap()
with open(TMP_FILE, "wb") as f:
f.write(buf.tostring())
cmd = "ffmpeg -vcodec rawvideo -r %d -f rawvideo -pix_fmt rgb565 -s 160x120 -i %s -vcodec mpeg4 %s"%(int(argv[3]), TMP_FILE, argv[2])
call(cmd.split())
|
Add Script to encode raw RGB565
|
Add Script to encode raw RGB565
* Encodes RGB565 raw stream to MP4/AVI
|
Python
|
mit
|
SmartArduino/openmv,SmartArduino/openmv,iabdalkader/openmv,kwagyeman/openmv,kwagyeman/openmv,tianzhihen/openmv,kwagyeman/openmv,SmartArduino/openmv,openmv/openmv,openmv/openmv,tianzhihen/openmv,iabdalkader/openmv,kwagyeman/openmv,tianzhihen/openmv,iabdalkader/openmv,iabdalkader/openmv,SmartArduino/openmv,openmv/openmv,openmv/openmv,tianzhihen/openmv
|
Add Script to encode raw RGB565
* Encodes RGB565 raw stream to MP4/AVI
|
#!/usr/bin/env python
# Converts raw RGB565 video to MP4/AVI
from sys import argv, exit
from array import array
from subprocess import call
buf=None
TMP_FILE = "/tmp/video.raw"
if (len(argv) != 4):
print("Usage: encode_raw input.raw output.avi fps")
exit(1)
with open(argv[1], "rb") as f:
buf = array("H", f.read())
#Swap not needed if rgb565be is supported
buf.byteswap()
with open(TMP_FILE, "wb") as f:
f.write(buf.tostring())
cmd = "ffmpeg -vcodec rawvideo -r %d -f rawvideo -pix_fmt rgb565 -s 160x120 -i %s -vcodec mpeg4 %s"%(int(argv[3]), TMP_FILE, argv[2])
call(cmd.split())
|
<commit_before><commit_msg>Add Script to encode raw RGB565
* Encodes RGB565 raw stream to MP4/AVI<commit_after>
|
#!/usr/bin/env python
# Converts raw RGB565 video to MP4/AVI
from sys import argv, exit
from array import array
from subprocess import call
buf=None
TMP_FILE = "/tmp/video.raw"
if (len(argv) != 4):
print("Usage: encode_raw input.raw output.avi fps")
exit(1)
with open(argv[1], "rb") as f:
buf = array("H", f.read())
#Swap not needed if rgb565be is supported
buf.byteswap()
with open(TMP_FILE, "wb") as f:
f.write(buf.tostring())
cmd = "ffmpeg -vcodec rawvideo -r %d -f rawvideo -pix_fmt rgb565 -s 160x120 -i %s -vcodec mpeg4 %s"%(int(argv[3]), TMP_FILE, argv[2])
call(cmd.split())
|
Add Script to encode raw RGB565
* Encodes RGB565 raw stream to MP4/AVI#!/usr/bin/env python
# Converts raw RGB565 video to MP4/AVI
from sys import argv, exit
from array import array
from subprocess import call
buf=None
TMP_FILE = "/tmp/video.raw"
if (len(argv) != 4):
print("Usage: encode_raw input.raw output.avi fps")
exit(1)
with open(argv[1], "rb") as f:
buf = array("H", f.read())
#Swap not needed if rgb565be is supported
buf.byteswap()
with open(TMP_FILE, "wb") as f:
f.write(buf.tostring())
cmd = "ffmpeg -vcodec rawvideo -r %d -f rawvideo -pix_fmt rgb565 -s 160x120 -i %s -vcodec mpeg4 %s"%(int(argv[3]), TMP_FILE, argv[2])
call(cmd.split())
|
<commit_before><commit_msg>Add Script to encode raw RGB565
* Encodes RGB565 raw stream to MP4/AVI<commit_after>#!/usr/bin/env python
# Converts raw RGB565 video to MP4/AVI
from sys import argv, exit
from array import array
from subprocess import call
buf=None
TMP_FILE = "/tmp/video.raw"
if (len(argv) != 4):
print("Usage: encode_raw input.raw output.avi fps")
exit(1)
with open(argv[1], "rb") as f:
buf = array("H", f.read())
#Swap not needed if rgb565be is supported
buf.byteswap()
with open(TMP_FILE, "wb") as f:
f.write(buf.tostring())
cmd = "ffmpeg -vcodec rawvideo -r %d -f rawvideo -pix_fmt rgb565 -s 160x120 -i %s -vcodec mpeg4 %s"%(int(argv[3]), TMP_FILE, argv[2])
call(cmd.split())
|
|
74354263acb3399295e7fde18d6aeed4b7bb7397
|
what_transcode/tests.py
|
what_transcode/tests.py
|
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase
from what_transcode.utils import get_mp3_ids
class UtilsTests(TestCase):
def test_get_mp3_ids(self):
what_group = {
'torrents': [
{
'id': 0,
'format': 'FLAC',
'encoding': 'Lossless',
'media': 'CD',
'remastered': False,
'remasterCatalogueNumber': None,
'remasterRecordLabel': None,
'remasterTitle': None,
'remasterYear': None,
},
{
'id': 1,
'format': 'MP3',
'encoding': '320',
'media': 'CD',
'remastered': False,
'remasterCatalogueNumber': None,
'remasterRecordLabel': None,
'remasterTitle': None,
'remasterYear': None,
},
{
'id': 2,
'format': 'FLAC',
'encoding': 'Lossless',
'media': 'CD',
'remastered': True,
'remasterCatalogueNumber': 'catno',
'remasterRecordLabel': None,
'remasterTitle': None,
'remasterYear': None,
},
{
'id': 3,
'format': 'FLAC',
'encoding': 'Lossless',
'media': 'WEB',
'remastered': False,
'remasterCatalogueNumber': None,
'remasterRecordLabel': None,
'remasterTitle': None,
'remasterYear': None,
},
{
'id': 4,
'format': 'MP3',
'encoding': 'V0 (VBR)',
'media': 'WEB',
'remastered': False,
'remasterCatalogueNumber': None,
'remasterRecordLabel': None,
'remasterTitle': None,
'remasterYear': None,
},
{
'id': 5,
'format': 'MP3',
'encoding': 'V2 (VBR)',
'media': 'WEB',
'remastered': False,
'remasterCatalogueNumber': None,
'remasterRecordLabel': None,
'remasterTitle': None,
'remasterYear': None,
},
]
}
self.assertEqual(get_mp3_ids(what_group, {
'torrent': what_group['torrents'][0]
}), {'320': 1})
self.assertEqual(get_mp3_ids(what_group, {
'torrent': what_group['torrents'][2]
}), {})
self.assertEqual(get_mp3_ids(what_group, {
'torrent': what_group['torrents'][3]
}), {'V0': 4, 'V2': 5})
|
Fix maybe all flake8 errors. Add first test.
|
Fix maybe all flake8 errors. Add first test.
|
Python
|
mit
|
grandmasterchef/WhatManager2,MADindustries/WhatManager2,grandmasterchef/WhatManager2,karamanolev/WhatManager2,davols/WhatManager2,MADindustries/WhatManager2,karamanolev/WhatManager2,MADindustries/WhatManager2,grandmasterchef/WhatManager2,davols/WhatManager2,MADindustries/WhatManager2,karamanolev/WhatManager2,grandmasterchef/WhatManager2,karamanolev/WhatManager2,davols/WhatManager2
|
Fix maybe all flake8 errors. Add first test.
|
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase
from what_transcode.utils import get_mp3_ids
class UtilsTests(TestCase):
def test_get_mp3_ids(self):
what_group = {
'torrents': [
{
'id': 0,
'format': 'FLAC',
'encoding': 'Lossless',
'media': 'CD',
'remastered': False,
'remasterCatalogueNumber': None,
'remasterRecordLabel': None,
'remasterTitle': None,
'remasterYear': None,
},
{
'id': 1,
'format': 'MP3',
'encoding': '320',
'media': 'CD',
'remastered': False,
'remasterCatalogueNumber': None,
'remasterRecordLabel': None,
'remasterTitle': None,
'remasterYear': None,
},
{
'id': 2,
'format': 'FLAC',
'encoding': 'Lossless',
'media': 'CD',
'remastered': True,
'remasterCatalogueNumber': 'catno',
'remasterRecordLabel': None,
'remasterTitle': None,
'remasterYear': None,
},
{
'id': 3,
'format': 'FLAC',
'encoding': 'Lossless',
'media': 'WEB',
'remastered': False,
'remasterCatalogueNumber': None,
'remasterRecordLabel': None,
'remasterTitle': None,
'remasterYear': None,
},
{
'id': 4,
'format': 'MP3',
'encoding': 'V0 (VBR)',
'media': 'WEB',
'remastered': False,
'remasterCatalogueNumber': None,
'remasterRecordLabel': None,
'remasterTitle': None,
'remasterYear': None,
},
{
'id': 5,
'format': 'MP3',
'encoding': 'V2 (VBR)',
'media': 'WEB',
'remastered': False,
'remasterCatalogueNumber': None,
'remasterRecordLabel': None,
'remasterTitle': None,
'remasterYear': None,
},
]
}
self.assertEqual(get_mp3_ids(what_group, {
'torrent': what_group['torrents'][0]
}), {'320': 1})
self.assertEqual(get_mp3_ids(what_group, {
'torrent': what_group['torrents'][2]
}), {})
self.assertEqual(get_mp3_ids(what_group, {
'torrent': what_group['torrents'][3]
}), {'V0': 4, 'V2': 5})
|
<commit_before><commit_msg>Fix maybe all flake8 errors. Add first test.<commit_after>
|
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase
from what_transcode.utils import get_mp3_ids
class UtilsTests(TestCase):
def test_get_mp3_ids(self):
what_group = {
'torrents': [
{
'id': 0,
'format': 'FLAC',
'encoding': 'Lossless',
'media': 'CD',
'remastered': False,
'remasterCatalogueNumber': None,
'remasterRecordLabel': None,
'remasterTitle': None,
'remasterYear': None,
},
{
'id': 1,
'format': 'MP3',
'encoding': '320',
'media': 'CD',
'remastered': False,
'remasterCatalogueNumber': None,
'remasterRecordLabel': None,
'remasterTitle': None,
'remasterYear': None,
},
{
'id': 2,
'format': 'FLAC',
'encoding': 'Lossless',
'media': 'CD',
'remastered': True,
'remasterCatalogueNumber': 'catno',
'remasterRecordLabel': None,
'remasterTitle': None,
'remasterYear': None,
},
{
'id': 3,
'format': 'FLAC',
'encoding': 'Lossless',
'media': 'WEB',
'remastered': False,
'remasterCatalogueNumber': None,
'remasterRecordLabel': None,
'remasterTitle': None,
'remasterYear': None,
},
{
'id': 4,
'format': 'MP3',
'encoding': 'V0 (VBR)',
'media': 'WEB',
'remastered': False,
'remasterCatalogueNumber': None,
'remasterRecordLabel': None,
'remasterTitle': None,
'remasterYear': None,
},
{
'id': 5,
'format': 'MP3',
'encoding': 'V2 (VBR)',
'media': 'WEB',
'remastered': False,
'remasterCatalogueNumber': None,
'remasterRecordLabel': None,
'remasterTitle': None,
'remasterYear': None,
},
]
}
self.assertEqual(get_mp3_ids(what_group, {
'torrent': what_group['torrents'][0]
}), {'320': 1})
self.assertEqual(get_mp3_ids(what_group, {
'torrent': what_group['torrents'][2]
}), {})
self.assertEqual(get_mp3_ids(what_group, {
'torrent': what_group['torrents'][3]
}), {'V0': 4, 'V2': 5})
|
Fix maybe all flake8 errors. Add first test."""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase
from what_transcode.utils import get_mp3_ids
class UtilsTests(TestCase):
def test_get_mp3_ids(self):
what_group = {
'torrents': [
{
'id': 0,
'format': 'FLAC',
'encoding': 'Lossless',
'media': 'CD',
'remastered': False,
'remasterCatalogueNumber': None,
'remasterRecordLabel': None,
'remasterTitle': None,
'remasterYear': None,
},
{
'id': 1,
'format': 'MP3',
'encoding': '320',
'media': 'CD',
'remastered': False,
'remasterCatalogueNumber': None,
'remasterRecordLabel': None,
'remasterTitle': None,
'remasterYear': None,
},
{
'id': 2,
'format': 'FLAC',
'encoding': 'Lossless',
'media': 'CD',
'remastered': True,
'remasterCatalogueNumber': 'catno',
'remasterRecordLabel': None,
'remasterTitle': None,
'remasterYear': None,
},
{
'id': 3,
'format': 'FLAC',
'encoding': 'Lossless',
'media': 'WEB',
'remastered': False,
'remasterCatalogueNumber': None,
'remasterRecordLabel': None,
'remasterTitle': None,
'remasterYear': None,
},
{
'id': 4,
'format': 'MP3',
'encoding': 'V0 (VBR)',
'media': 'WEB',
'remastered': False,
'remasterCatalogueNumber': None,
'remasterRecordLabel': None,
'remasterTitle': None,
'remasterYear': None,
},
{
'id': 5,
'format': 'MP3',
'encoding': 'V2 (VBR)',
'media': 'WEB',
'remastered': False,
'remasterCatalogueNumber': None,
'remasterRecordLabel': None,
'remasterTitle': None,
'remasterYear': None,
},
]
}
self.assertEqual(get_mp3_ids(what_group, {
'torrent': what_group['torrents'][0]
}), {'320': 1})
self.assertEqual(get_mp3_ids(what_group, {
'torrent': what_group['torrents'][2]
}), {})
self.assertEqual(get_mp3_ids(what_group, {
'torrent': what_group['torrents'][3]
}), {'V0': 4, 'V2': 5})
|
<commit_before><commit_msg>Fix maybe all flake8 errors. Add first test.<commit_after>"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase
from what_transcode.utils import get_mp3_ids
class UtilsTests(TestCase):
def test_get_mp3_ids(self):
what_group = {
'torrents': [
{
'id': 0,
'format': 'FLAC',
'encoding': 'Lossless',
'media': 'CD',
'remastered': False,
'remasterCatalogueNumber': None,
'remasterRecordLabel': None,
'remasterTitle': None,
'remasterYear': None,
},
{
'id': 1,
'format': 'MP3',
'encoding': '320',
'media': 'CD',
'remastered': False,
'remasterCatalogueNumber': None,
'remasterRecordLabel': None,
'remasterTitle': None,
'remasterYear': None,
},
{
'id': 2,
'format': 'FLAC',
'encoding': 'Lossless',
'media': 'CD',
'remastered': True,
'remasterCatalogueNumber': 'catno',
'remasterRecordLabel': None,
'remasterTitle': None,
'remasterYear': None,
},
{
'id': 3,
'format': 'FLAC',
'encoding': 'Lossless',
'media': 'WEB',
'remastered': False,
'remasterCatalogueNumber': None,
'remasterRecordLabel': None,
'remasterTitle': None,
'remasterYear': None,
},
{
'id': 4,
'format': 'MP3',
'encoding': 'V0 (VBR)',
'media': 'WEB',
'remastered': False,
'remasterCatalogueNumber': None,
'remasterRecordLabel': None,
'remasterTitle': None,
'remasterYear': None,
},
{
'id': 5,
'format': 'MP3',
'encoding': 'V2 (VBR)',
'media': 'WEB',
'remastered': False,
'remasterCatalogueNumber': None,
'remasterRecordLabel': None,
'remasterTitle': None,
'remasterYear': None,
},
]
}
self.assertEqual(get_mp3_ids(what_group, {
'torrent': what_group['torrents'][0]
}), {'320': 1})
self.assertEqual(get_mp3_ids(what_group, {
'torrent': what_group['torrents'][2]
}), {})
self.assertEqual(get_mp3_ids(what_group, {
'torrent': what_group['torrents'][3]
}), {'V0': 4, 'V2': 5})
|
|
cb9166c4564c4e763e1214355dc76cbe6d466258
|
books/migrations/0009_auto_20141127_1718.py
|
books/migrations/0009_auto_20141127_1718.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def add_sections(apps, schema_editor):
# Don't just use books.models.Section, that could be out of date
Section = apps.get_model('books', 'Section')
FRONT_MATTER_CHOICES = [
#('db_value', 'human readable'),
('half_title', 'Half title'),
('title_page', 'Title Page'),
('colophon', 'Colophon'),
('contents', 'Contents'),
('foreward', 'Foreward'),
('preface', 'Preface'),
('acknowledgment', 'Acknowlegment'),
('introduction', 'Introduction'),
('dedication', 'Dedication'),
('epigraph', 'Epigraph'),
('prologue', 'Prologue'),
]
BACK_MATTER_CHOICES = [
('epilogue', 'Epilogue'),
('afterward', 'Afterward'),
('conclusion', 'Conclusion'),
('postscript', 'Postscript'),
('appendix', 'Appendix'),
('glossary', 'Glossary'),
('bibliography', 'Bibliography'),
('index', 'Index'),
('colophon', 'Colophon'),
]
for order, (sect_name, _) in enumerate(FRONT_MATTER_CHOICES):
sect = Section(name=sect_name, order=order, location='front')
sect.save()
for order, (sect_name, _) in enumerate(BACK_MATTER_CHOICES):
sect = Section(name=sect_name, order=order, location='back')
sect.save()
def remove_sections(apps, schema_editor):
""" Just make the migration reversible, by calling this function. """
Section = apps.get_model('books', 'Section')
for section in Section.objects.all():
section.delete()
class Migration(migrations.Migration):
dependencies = [
('books', '0008_auto_20141127_1657'),
]
operations = [
migrations.RunPython(add_sections, remove_sections),
]
|
Add data migration for section
|
Add data migration for section
|
Python
|
mit
|
supermitch/simple-author
|
Add data migration for section
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def add_sections(apps, schema_editor):
# Don't just use books.models.Section, that could be out of date
Section = apps.get_model('books', 'Section')
FRONT_MATTER_CHOICES = [
#('db_value', 'human readable'),
('half_title', 'Half title'),
('title_page', 'Title Page'),
('colophon', 'Colophon'),
('contents', 'Contents'),
('foreward', 'Foreward'),
('preface', 'Preface'),
('acknowledgment', 'Acknowlegment'),
('introduction', 'Introduction'),
('dedication', 'Dedication'),
('epigraph', 'Epigraph'),
('prologue', 'Prologue'),
]
BACK_MATTER_CHOICES = [
('epilogue', 'Epilogue'),
('afterward', 'Afterward'),
('conclusion', 'Conclusion'),
('postscript', 'Postscript'),
('appendix', 'Appendix'),
('glossary', 'Glossary'),
('bibliography', 'Bibliography'),
('index', 'Index'),
('colophon', 'Colophon'),
]
for order, (sect_name, _) in enumerate(FRONT_MATTER_CHOICES):
sect = Section(name=sect_name, order=order, location='front')
sect.save()
for order, (sect_name, _) in enumerate(BACK_MATTER_CHOICES):
sect = Section(name=sect_name, order=order, location='back')
sect.save()
def remove_sections(apps, schema_editor):
""" Just make the migration reversible, by calling this function. """
Section = apps.get_model('books', 'Section')
for section in Section.objects.all():
section.delete()
class Migration(migrations.Migration):
dependencies = [
('books', '0008_auto_20141127_1657'),
]
operations = [
migrations.RunPython(add_sections, remove_sections),
]
|
<commit_before><commit_msg>Add data migration for section<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def add_sections(apps, schema_editor):
# Don't just use books.models.Section, that could be out of date
Section = apps.get_model('books', 'Section')
FRONT_MATTER_CHOICES = [
#('db_value', 'human readable'),
('half_title', 'Half title'),
('title_page', 'Title Page'),
('colophon', 'Colophon'),
('contents', 'Contents'),
('foreward', 'Foreward'),
('preface', 'Preface'),
('acknowledgment', 'Acknowlegment'),
('introduction', 'Introduction'),
('dedication', 'Dedication'),
('epigraph', 'Epigraph'),
('prologue', 'Prologue'),
]
BACK_MATTER_CHOICES = [
('epilogue', 'Epilogue'),
('afterward', 'Afterward'),
('conclusion', 'Conclusion'),
('postscript', 'Postscript'),
('appendix', 'Appendix'),
('glossary', 'Glossary'),
('bibliography', 'Bibliography'),
('index', 'Index'),
('colophon', 'Colophon'),
]
for order, (sect_name, _) in enumerate(FRONT_MATTER_CHOICES):
sect = Section(name=sect_name, order=order, location='front')
sect.save()
for order, (sect_name, _) in enumerate(BACK_MATTER_CHOICES):
sect = Section(name=sect_name, order=order, location='back')
sect.save()
def remove_sections(apps, schema_editor):
""" Just make the migration reversible, by calling this function. """
Section = apps.get_model('books', 'Section')
for section in Section.objects.all():
section.delete()
class Migration(migrations.Migration):
dependencies = [
('books', '0008_auto_20141127_1657'),
]
operations = [
migrations.RunPython(add_sections, remove_sections),
]
|
Add data migration for section# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def add_sections(apps, schema_editor):
# Don't just use books.models.Section, that could be out of date
Section = apps.get_model('books', 'Section')
FRONT_MATTER_CHOICES = [
#('db_value', 'human readable'),
('half_title', 'Half title'),
('title_page', 'Title Page'),
('colophon', 'Colophon'),
('contents', 'Contents'),
('foreward', 'Foreward'),
('preface', 'Preface'),
('acknowledgment', 'Acknowlegment'),
('introduction', 'Introduction'),
('dedication', 'Dedication'),
('epigraph', 'Epigraph'),
('prologue', 'Prologue'),
]
BACK_MATTER_CHOICES = [
('epilogue', 'Epilogue'),
('afterward', 'Afterward'),
('conclusion', 'Conclusion'),
('postscript', 'Postscript'),
('appendix', 'Appendix'),
('glossary', 'Glossary'),
('bibliography', 'Bibliography'),
('index', 'Index'),
('colophon', 'Colophon'),
]
for order, (sect_name, _) in enumerate(FRONT_MATTER_CHOICES):
sect = Section(name=sect_name, order=order, location='front')
sect.save()
for order, (sect_name, _) in enumerate(BACK_MATTER_CHOICES):
sect = Section(name=sect_name, order=order, location='back')
sect.save()
def remove_sections(apps, schema_editor):
""" Just make the migration reversible, by calling this function. """
Section = apps.get_model('books', 'Section')
for section in Section.objects.all():
section.delete()
class Migration(migrations.Migration):
dependencies = [
('books', '0008_auto_20141127_1657'),
]
operations = [
migrations.RunPython(add_sections, remove_sections),
]
|
<commit_before><commit_msg>Add data migration for section<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def add_sections(apps, schema_editor):
# Don't just use books.models.Section, that could be out of date
Section = apps.get_model('books', 'Section')
FRONT_MATTER_CHOICES = [
#('db_value', 'human readable'),
('half_title', 'Half title'),
('title_page', 'Title Page'),
('colophon', 'Colophon'),
('contents', 'Contents'),
('foreward', 'Foreward'),
('preface', 'Preface'),
('acknowledgment', 'Acknowlegment'),
('introduction', 'Introduction'),
('dedication', 'Dedication'),
('epigraph', 'Epigraph'),
('prologue', 'Prologue'),
]
BACK_MATTER_CHOICES = [
('epilogue', 'Epilogue'),
('afterward', 'Afterward'),
('conclusion', 'Conclusion'),
('postscript', 'Postscript'),
('appendix', 'Appendix'),
('glossary', 'Glossary'),
('bibliography', 'Bibliography'),
('index', 'Index'),
('colophon', 'Colophon'),
]
for order, (sect_name, _) in enumerate(FRONT_MATTER_CHOICES):
sect = Section(name=sect_name, order=order, location='front')
sect.save()
for order, (sect_name, _) in enumerate(BACK_MATTER_CHOICES):
sect = Section(name=sect_name, order=order, location='back')
sect.save()
def remove_sections(apps, schema_editor):
""" Just make the migration reversible, by calling this function. """
Section = apps.get_model('books', 'Section')
for section in Section.objects.all():
section.delete()
class Migration(migrations.Migration):
dependencies = [
('books', '0008_auto_20141127_1657'),
]
operations = [
migrations.RunPython(add_sections, remove_sections),
]
|
|
ebd62eac70d5589b0b7f593009024868f981e658
|
calvin/actorstore/systemactors/std/ClassicDelay.py
|
calvin/actorstore/systemactors/std/ClassicDelay.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from calvin.actor.actor import Actor, ActionResult, manage, condition, guard
class ClassicDelay(Actor):
"""
After first token, pass on token once every 'delay' seconds.
Input :
token: anything
Outputs:
token: anything
"""
@manage(['delay'])
def init(self, delay=0.1):
self.delay = delay
self.use('calvinsys.events.timer', shorthand='timer')
self.timer = None
def setup(self):
self.timer = self['timer'].repeat(self.delay)
def will_migrate(self):
self.timer.cancel()
def did_migrate(self):
self.setup()
@condition(['token'], ['token'])
@guard(lambda self, _: not self.timer)
def start_timer(self, token):
self.setup()
return ActionResult(production=(token, ))
@condition(['token'], ['token'])
@guard(lambda self, _: self.timer and self.timer.triggered)
def passthrough(self, token):
self.timer.ack()
return ActionResult(production=(token, ))
action_priority = (start_timer, passthrough)
requires = ['calvinsys.events.timer']
|
Add actor with behavior similar to old-style Delay
|
Add actor with behavior similar to old-style Delay
|
Python
|
apache-2.0
|
les69/calvin-base,EricssonResearch/calvin-base,les69/calvin-base,EricssonResearch/calvin-base,EricssonResearch/calvin-base,les69/calvin-base,EricssonResearch/calvin-base,les69/calvin-base
|
Add actor with behavior similar to old-style Delay
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from calvin.actor.actor import Actor, ActionResult, manage, condition, guard
class ClassicDelay(Actor):
"""
After first token, pass on token once every 'delay' seconds.
Input :
token: anything
Outputs:
token: anything
"""
@manage(['delay'])
def init(self, delay=0.1):
self.delay = delay
self.use('calvinsys.events.timer', shorthand='timer')
self.timer = None
def setup(self):
self.timer = self['timer'].repeat(self.delay)
def will_migrate(self):
self.timer.cancel()
def did_migrate(self):
self.setup()
@condition(['token'], ['token'])
@guard(lambda self, _: not self.timer)
def start_timer(self, token):
self.setup()
return ActionResult(production=(token, ))
@condition(['token'], ['token'])
@guard(lambda self, _: self.timer and self.timer.triggered)
def passthrough(self, token):
self.timer.ack()
return ActionResult(production=(token, ))
action_priority = (start_timer, passthrough)
requires = ['calvinsys.events.timer']
|
<commit_before><commit_msg>Add actor with behavior similar to old-style Delay<commit_after>
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from calvin.actor.actor import Actor, ActionResult, manage, condition, guard
class ClassicDelay(Actor):
"""
After first token, pass on token once every 'delay' seconds.
Input :
token: anything
Outputs:
token: anything
"""
@manage(['delay'])
def init(self, delay=0.1):
self.delay = delay
self.use('calvinsys.events.timer', shorthand='timer')
self.timer = None
def setup(self):
self.timer = self['timer'].repeat(self.delay)
def will_migrate(self):
self.timer.cancel()
def did_migrate(self):
self.setup()
@condition(['token'], ['token'])
@guard(lambda self, _: not self.timer)
def start_timer(self, token):
self.setup()
return ActionResult(production=(token, ))
@condition(['token'], ['token'])
@guard(lambda self, _: self.timer and self.timer.triggered)
def passthrough(self, token):
self.timer.ack()
return ActionResult(production=(token, ))
action_priority = (start_timer, passthrough)
requires = ['calvinsys.events.timer']
|
Add actor with behavior similar to old-style Delay# -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from calvin.actor.actor import Actor, ActionResult, manage, condition, guard
class ClassicDelay(Actor):
"""
After first token, pass on token once every 'delay' seconds.
Input :
token: anything
Outputs:
token: anything
"""
@manage(['delay'])
def init(self, delay=0.1):
self.delay = delay
self.use('calvinsys.events.timer', shorthand='timer')
self.timer = None
def setup(self):
self.timer = self['timer'].repeat(self.delay)
def will_migrate(self):
self.timer.cancel()
def did_migrate(self):
self.setup()
@condition(['token'], ['token'])
@guard(lambda self, _: not self.timer)
def start_timer(self, token):
self.setup()
return ActionResult(production=(token, ))
@condition(['token'], ['token'])
@guard(lambda self, _: self.timer and self.timer.triggered)
def passthrough(self, token):
self.timer.ack()
return ActionResult(production=(token, ))
action_priority = (start_timer, passthrough)
requires = ['calvinsys.events.timer']
|
<commit_before><commit_msg>Add actor with behavior similar to old-style Delay<commit_after># -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from calvin.actor.actor import Actor, ActionResult, manage, condition, guard
class ClassicDelay(Actor):
"""
After first token, pass on token once every 'delay' seconds.
Input :
token: anything
Outputs:
token: anything
"""
@manage(['delay'])
def init(self, delay=0.1):
self.delay = delay
self.use('calvinsys.events.timer', shorthand='timer')
self.timer = None
def setup(self):
self.timer = self['timer'].repeat(self.delay)
def will_migrate(self):
self.timer.cancel()
def did_migrate(self):
self.setup()
@condition(['token'], ['token'])
@guard(lambda self, _: not self.timer)
def start_timer(self, token):
self.setup()
return ActionResult(production=(token, ))
@condition(['token'], ['token'])
@guard(lambda self, _: self.timer and self.timer.triggered)
def passthrough(self, token):
self.timer.ack()
return ActionResult(production=(token, ))
action_priority = (start_timer, passthrough)
requires = ['calvinsys.events.timer']
|
|
e363aac46c9a5b607c7b32bcc5546c5a2728d750
|
climate_data/migrations/0029_auto_20170628_1527.py
|
climate_data/migrations/0029_auto_20170628_1527.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-28 15:27
from __future__ import unicode_literals
from django.db import migrations
from datetime import timedelta
# noinspection PyUnusedLocal
def add_message_id_to_reading(apps, schema_editor):
# noinspection PyPep8Naming
Reading = apps.get_model('climate_data', 'Reading')
# noinspection PyPep8Naming
Message = apps.get_model('climate_data', 'Message')
for reading in Reading.objects.filter(message_id=None):
reading.message = Message.objects.filter(
station=reading.station,
arrival_time__gt=reading.read_time,
arrival_time__lt=(reading.read_time + timedelta(minutes=52))
).first()
reading.save()
class Migration(migrations.Migration):
dependencies = [
('climate_data', '0028_auto_20170627_1914'),
]
operations = [
migrations.RunPython(add_message_id_to_reading),
]
|
Add migration which fixes missing message IDs.
|
Add migration which fixes missing message IDs.
|
Python
|
apache-2.0
|
qubs/data-centre,qubs/data-centre,qubs/climate-data-api,qubs/climate-data-api
|
Add migration which fixes missing message IDs.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-28 15:27
from __future__ import unicode_literals
from django.db import migrations
from datetime import timedelta
# noinspection PyUnusedLocal
def add_message_id_to_reading(apps, schema_editor):
# noinspection PyPep8Naming
Reading = apps.get_model('climate_data', 'Reading')
# noinspection PyPep8Naming
Message = apps.get_model('climate_data', 'Message')
for reading in Reading.objects.filter(message_id=None):
reading.message = Message.objects.filter(
station=reading.station,
arrival_time__gt=reading.read_time,
arrival_time__lt=(reading.read_time + timedelta(minutes=52))
).first()
reading.save()
class Migration(migrations.Migration):
dependencies = [
('climate_data', '0028_auto_20170627_1914'),
]
operations = [
migrations.RunPython(add_message_id_to_reading),
]
|
<commit_before><commit_msg>Add migration which fixes missing message IDs.<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-28 15:27
from __future__ import unicode_literals
from django.db import migrations
from datetime import timedelta
# noinspection PyUnusedLocal
def add_message_id_to_reading(apps, schema_editor):
# noinspection PyPep8Naming
Reading = apps.get_model('climate_data', 'Reading')
# noinspection PyPep8Naming
Message = apps.get_model('climate_data', 'Message')
for reading in Reading.objects.filter(message_id=None):
reading.message = Message.objects.filter(
station=reading.station,
arrival_time__gt=reading.read_time,
arrival_time__lt=(reading.read_time + timedelta(minutes=52))
).first()
reading.save()
class Migration(migrations.Migration):
dependencies = [
('climate_data', '0028_auto_20170627_1914'),
]
operations = [
migrations.RunPython(add_message_id_to_reading),
]
|
Add migration which fixes missing message IDs.# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-28 15:27
from __future__ import unicode_literals
from django.db import migrations
from datetime import timedelta
# noinspection PyUnusedLocal
def add_message_id_to_reading(apps, schema_editor):
# noinspection PyPep8Naming
Reading = apps.get_model('climate_data', 'Reading')
# noinspection PyPep8Naming
Message = apps.get_model('climate_data', 'Message')
for reading in Reading.objects.filter(message_id=None):
reading.message = Message.objects.filter(
station=reading.station,
arrival_time__gt=reading.read_time,
arrival_time__lt=(reading.read_time + timedelta(minutes=52))
).first()
reading.save()
class Migration(migrations.Migration):
dependencies = [
('climate_data', '0028_auto_20170627_1914'),
]
operations = [
migrations.RunPython(add_message_id_to_reading),
]
|
<commit_before><commit_msg>Add migration which fixes missing message IDs.<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-28 15:27
from __future__ import unicode_literals
from django.db import migrations
from datetime import timedelta
# noinspection PyUnusedLocal
def add_message_id_to_reading(apps, schema_editor):
# noinspection PyPep8Naming
Reading = apps.get_model('climate_data', 'Reading')
# noinspection PyPep8Naming
Message = apps.get_model('climate_data', 'Message')
for reading in Reading.objects.filter(message_id=None):
reading.message = Message.objects.filter(
station=reading.station,
arrival_time__gt=reading.read_time,
arrival_time__lt=(reading.read_time + timedelta(minutes=52))
).first()
reading.save()
class Migration(migrations.Migration):
dependencies = [
('climate_data', '0028_auto_20170627_1914'),
]
operations = [
migrations.RunPython(add_message_id_to_reading),
]
|
|
840bc57e7120ae67e84c1c7bca94cfef34c8d2a8
|
scripts/add_missing_identifiers_to_preprints.py
|
scripts/add_missing_identifiers_to_preprints.py
|
import sys
import time
import logging
from scripts import utils as script_utils
from django.db import transaction
from website.app import setup_django
from website.identifiers.utils import request_identifiers_from_ezid, parse_identifiers
setup_django()
logger = logging.getLogger(__name__)
def add_identifiers_to_preprints(dry=True):
from osf.models import PreprintService
preprints_without_identifiers = PreprintService.objects.filter(identifiers__isnull=True)
logger.info('About to add identifiers to {} preprints.'.format(preprints_without_identifiers.count()))
for preprint in preprints_without_identifiers:
logger.info('Saving identifier for preprint {} from source {}'.format(preprint._id, preprint.provider.name))
if not dry:
ezid_response = request_identifiers_from_ezid(preprint)
id_dict = parse_identifiers(ezid_response)
preprint.set_identifier_values(doi=id_dict['doi'], ark=id_dict['ark'])
preprint.save()
doi = preprint.get_identifier('doi')
assert preprint._id.upper() in doi.value
logger.info('Created DOI {} for Preprint with guid {} from service {}'.format(doi.value, preprint._id, preprint.provider.name))
time.sleep(1)
else:
logger.info('Dry run - would have created identifier for preprint {} from service {}'.format(preprint._id, preprint.provider.name))
logger.info('Finished Adding identifiers to {} preprints.'.format(preprints_without_identifiers.count()))
def main(dry=True):
# Start a transaction that will be rolled back if any exceptions are un
add_identifiers_to_preprints(dry)
if dry:
# When running in dry mode force the transaction to rollback
raise Exception('Dry Run complete -- not actually saved')
if __name__ == '__main__':
dry = '--dry' in sys.argv
if not dry:
# If we're not running in dry mode log everything to a file
script_utils.add_file_logger(logger, __file__)
# Allow setting the log level just by appending the level to the command
if '--debug' in sys.argv:
logger.setLevel(logging.DEBUG)
elif '--warning' in sys.argv:
logger.setLevel(logging.WARNING)
elif '--info' in sys.argv:
logger.setLevel(logging.INFO)
elif '--error' in sys.argv:
logger.setLevel(logging.ERROR)
# Finally run the migration
main(dry=dry)
|
Copy old script from @erinspace which added identifiers to existing preprints.
|
Copy old script from @erinspace which added identifiers to existing preprints.
|
Python
|
apache-2.0
|
erinspace/osf.io,chennan47/osf.io,Johnetordoff/osf.io,cslzchen/osf.io,felliott/osf.io,baylee-d/osf.io,saradbowman/osf.io,leb2dg/osf.io,caseyrollins/osf.io,sloria/osf.io,Johnetordoff/osf.io,TomBaxter/osf.io,crcresearch/osf.io,mattclark/osf.io,brianjgeiger/osf.io,aaxelb/osf.io,felliott/osf.io,mfraezz/osf.io,adlius/osf.io,mfraezz/osf.io,adlius/osf.io,icereval/osf.io,CenterForOpenScience/osf.io,brianjgeiger/osf.io,mfraezz/osf.io,pattisdr/osf.io,sloria/osf.io,CenterForOpenScience/osf.io,Johnetordoff/osf.io,TomBaxter/osf.io,aaxelb/osf.io,baylee-d/osf.io,CenterForOpenScience/osf.io,icereval/osf.io,mfraezz/osf.io,brianjgeiger/osf.io,CenterForOpenScience/osf.io,felliott/osf.io,baylee-d/osf.io,laurenrevere/osf.io,chennan47/osf.io,pattisdr/osf.io,crcresearch/osf.io,brianjgeiger/osf.io,aaxelb/osf.io,chennan47/osf.io,cslzchen/osf.io,laurenrevere/osf.io,HalcyonChimera/osf.io,adlius/osf.io,binoculars/osf.io,cslzchen/osf.io,crcresearch/osf.io,leb2dg/osf.io,TomBaxter/osf.io,pattisdr/osf.io,mattclark/osf.io,binoculars/osf.io,erinspace/osf.io,icereval/osf.io,sloria/osf.io,cslzchen/osf.io,caseyrollins/osf.io,mattclark/osf.io,felliott/osf.io,HalcyonChimera/osf.io,HalcyonChimera/osf.io,aaxelb/osf.io,saradbowman/osf.io,erinspace/osf.io,leb2dg/osf.io,laurenrevere/osf.io,leb2dg/osf.io,HalcyonChimera/osf.io,adlius/osf.io,binoculars/osf.io,caseyrollins/osf.io,Johnetordoff/osf.io
|
Copy old script from @erinspace which added identifiers to existing preprints.
|
import sys
import time
import logging
from scripts import utils as script_utils
from django.db import transaction
from website.app import setup_django
from website.identifiers.utils import request_identifiers_from_ezid, parse_identifiers
setup_django()
logger = logging.getLogger(__name__)
def add_identifiers_to_preprints(dry=True):
from osf.models import PreprintService
preprints_without_identifiers = PreprintService.objects.filter(identifiers__isnull=True)
logger.info('About to add identifiers to {} preprints.'.format(preprints_without_identifiers.count()))
for preprint in preprints_without_identifiers:
logger.info('Saving identifier for preprint {} from source {}'.format(preprint._id, preprint.provider.name))
if not dry:
ezid_response = request_identifiers_from_ezid(preprint)
id_dict = parse_identifiers(ezid_response)
preprint.set_identifier_values(doi=id_dict['doi'], ark=id_dict['ark'])
preprint.save()
doi = preprint.get_identifier('doi')
assert preprint._id.upper() in doi.value
logger.info('Created DOI {} for Preprint with guid {} from service {}'.format(doi.value, preprint._id, preprint.provider.name))
time.sleep(1)
else:
logger.info('Dry run - would have created identifier for preprint {} from service {}'.format(preprint._id, preprint.provider.name))
logger.info('Finished Adding identifiers to {} preprints.'.format(preprints_without_identifiers.count()))
def main(dry=True):
# Start a transaction that will be rolled back if any exceptions are un
add_identifiers_to_preprints(dry)
if dry:
# When running in dry mode force the transaction to rollback
raise Exception('Dry Run complete -- not actually saved')
if __name__ == '__main__':
dry = '--dry' in sys.argv
if not dry:
# If we're not running in dry mode log everything to a file
script_utils.add_file_logger(logger, __file__)
# Allow setting the log level just by appending the level to the command
if '--debug' in sys.argv:
logger.setLevel(logging.DEBUG)
elif '--warning' in sys.argv:
logger.setLevel(logging.WARNING)
elif '--info' in sys.argv:
logger.setLevel(logging.INFO)
elif '--error' in sys.argv:
logger.setLevel(logging.ERROR)
# Finally run the migration
main(dry=dry)
|
<commit_before><commit_msg>Copy old script from @erinspace which added identifiers to existing preprints.<commit_after>
|
import sys
import time
import logging
from scripts import utils as script_utils
from django.db import transaction
from website.app import setup_django
from website.identifiers.utils import request_identifiers_from_ezid, parse_identifiers
setup_django()
logger = logging.getLogger(__name__)
def add_identifiers_to_preprints(dry=True):
from osf.models import PreprintService
preprints_without_identifiers = PreprintService.objects.filter(identifiers__isnull=True)
logger.info('About to add identifiers to {} preprints.'.format(preprints_without_identifiers.count()))
for preprint in preprints_without_identifiers:
logger.info('Saving identifier for preprint {} from source {}'.format(preprint._id, preprint.provider.name))
if not dry:
ezid_response = request_identifiers_from_ezid(preprint)
id_dict = parse_identifiers(ezid_response)
preprint.set_identifier_values(doi=id_dict['doi'], ark=id_dict['ark'])
preprint.save()
doi = preprint.get_identifier('doi')
assert preprint._id.upper() in doi.value
logger.info('Created DOI {} for Preprint with guid {} from service {}'.format(doi.value, preprint._id, preprint.provider.name))
time.sleep(1)
else:
logger.info('Dry run - would have created identifier for preprint {} from service {}'.format(preprint._id, preprint.provider.name))
logger.info('Finished Adding identifiers to {} preprints.'.format(preprints_without_identifiers.count()))
def main(dry=True):
# Start a transaction that will be rolled back if any exceptions are un
add_identifiers_to_preprints(dry)
if dry:
# When running in dry mode force the transaction to rollback
raise Exception('Dry Run complete -- not actually saved')
if __name__ == '__main__':
dry = '--dry' in sys.argv
if not dry:
# If we're not running in dry mode log everything to a file
script_utils.add_file_logger(logger, __file__)
# Allow setting the log level just by appending the level to the command
if '--debug' in sys.argv:
logger.setLevel(logging.DEBUG)
elif '--warning' in sys.argv:
logger.setLevel(logging.WARNING)
elif '--info' in sys.argv:
logger.setLevel(logging.INFO)
elif '--error' in sys.argv:
logger.setLevel(logging.ERROR)
# Finally run the migration
main(dry=dry)
|
Copy old script from @erinspace which added identifiers to existing preprints.import sys
import time
import logging
from scripts import utils as script_utils
from django.db import transaction
from website.app import setup_django
from website.identifiers.utils import request_identifiers_from_ezid, parse_identifiers
setup_django()
logger = logging.getLogger(__name__)
def add_identifiers_to_preprints(dry=True):
from osf.models import PreprintService
preprints_without_identifiers = PreprintService.objects.filter(identifiers__isnull=True)
logger.info('About to add identifiers to {} preprints.'.format(preprints_without_identifiers.count()))
for preprint in preprints_without_identifiers:
logger.info('Saving identifier for preprint {} from source {}'.format(preprint._id, preprint.provider.name))
if not dry:
ezid_response = request_identifiers_from_ezid(preprint)
id_dict = parse_identifiers(ezid_response)
preprint.set_identifier_values(doi=id_dict['doi'], ark=id_dict['ark'])
preprint.save()
doi = preprint.get_identifier('doi')
assert preprint._id.upper() in doi.value
logger.info('Created DOI {} for Preprint with guid {} from service {}'.format(doi.value, preprint._id, preprint.provider.name))
time.sleep(1)
else:
logger.info('Dry run - would have created identifier for preprint {} from service {}'.format(preprint._id, preprint.provider.name))
logger.info('Finished Adding identifiers to {} preprints.'.format(preprints_without_identifiers.count()))
def main(dry=True):
# Start a transaction that will be rolled back if any exceptions are un
add_identifiers_to_preprints(dry)
if dry:
# When running in dry mode force the transaction to rollback
raise Exception('Dry Run complete -- not actually saved')
if __name__ == '__main__':
dry = '--dry' in sys.argv
if not dry:
# If we're not running in dry mode log everything to a file
script_utils.add_file_logger(logger, __file__)
# Allow setting the log level just by appending the level to the command
if '--debug' in sys.argv:
logger.setLevel(logging.DEBUG)
elif '--warning' in sys.argv:
logger.setLevel(logging.WARNING)
elif '--info' in sys.argv:
logger.setLevel(logging.INFO)
elif '--error' in sys.argv:
logger.setLevel(logging.ERROR)
# Finally run the migration
main(dry=dry)
|
<commit_before><commit_msg>Copy old script from @erinspace which added identifiers to existing preprints.<commit_after>import sys
import time
import logging
from scripts import utils as script_utils
from django.db import transaction
from website.app import setup_django
from website.identifiers.utils import request_identifiers_from_ezid, parse_identifiers
setup_django()
logger = logging.getLogger(__name__)
def add_identifiers_to_preprints(dry=True):
from osf.models import PreprintService
preprints_without_identifiers = PreprintService.objects.filter(identifiers__isnull=True)
logger.info('About to add identifiers to {} preprints.'.format(preprints_without_identifiers.count()))
for preprint in preprints_without_identifiers:
logger.info('Saving identifier for preprint {} from source {}'.format(preprint._id, preprint.provider.name))
if not dry:
ezid_response = request_identifiers_from_ezid(preprint)
id_dict = parse_identifiers(ezid_response)
preprint.set_identifier_values(doi=id_dict['doi'], ark=id_dict['ark'])
preprint.save()
doi = preprint.get_identifier('doi')
assert preprint._id.upper() in doi.value
logger.info('Created DOI {} for Preprint with guid {} from service {}'.format(doi.value, preprint._id, preprint.provider.name))
time.sleep(1)
else:
logger.info('Dry run - would have created identifier for preprint {} from service {}'.format(preprint._id, preprint.provider.name))
logger.info('Finished Adding identifiers to {} preprints.'.format(preprints_without_identifiers.count()))
def main(dry=True):
# Start a transaction that will be rolled back if any exceptions are un
add_identifiers_to_preprints(dry)
if dry:
# When running in dry mode force the transaction to rollback
raise Exception('Dry Run complete -- not actually saved')
if __name__ == '__main__':
dry = '--dry' in sys.argv
if not dry:
# If we're not running in dry mode log everything to a file
script_utils.add_file_logger(logger, __file__)
# Allow setting the log level just by appending the level to the command
if '--debug' in sys.argv:
logger.setLevel(logging.DEBUG)
elif '--warning' in sys.argv:
logger.setLevel(logging.WARNING)
elif '--info' in sys.argv:
logger.setLevel(logging.INFO)
elif '--error' in sys.argv:
logger.setLevel(logging.ERROR)
# Finally run the migration
main(dry=dry)
|
|
a02a46752d954c29a65bf8bc5b88fa3545315175
|
lib/svtplay_dl/tests/utils.py
|
lib/svtplay_dl/tests/utils.py
|
#!/usr/bin/python
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
# The unittest framwork doesn't play nice with pylint:
# pylint: disable-msg=C0103
from __future__ import absolute_import
import unittest
import svtplay_dl.utils
class timestrTest(unittest.TestCase):
def test_1(self):
self.assertEqual(svtplay_dl.utils.timestr(1), "00:00:00,00")
def test_100(self):
self.assertEqual(svtplay_dl.utils.timestr(100), "00:00:00,10")
def test_3600(self):
self.assertEqual(svtplay_dl.utils.timestr(3600), "00:00:03,60")
def test_3600000(self):
self.assertEqual(svtplay_dl.utils.timestr(3600000), "01:00:00,00")
|
Add unit tests for timestr()
|
Add unit tests for timestr()
|
Python
|
mit
|
OakNinja/svtplay-dl,qnorsten/svtplay-dl,dalgr/svtplay-dl,iwconfig/svtplay-dl,dalgr/svtplay-dl,leakim/svtplay-dl,selepo/svtplay-dl,spaam/svtplay-dl,olof/svtplay-dl,leakim/svtplay-dl,OakNinja/svtplay-dl,qnorsten/svtplay-dl,selepo/svtplay-dl,iwconfig/svtplay-dl,olof/svtplay-dl,spaam/svtplay-dl,OakNinja/svtplay-dl,leakim/svtplay-dl
|
Add unit tests for timestr()
|
#!/usr/bin/python
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
# The unittest framwork doesn't play nice with pylint:
# pylint: disable-msg=C0103
from __future__ import absolute_import
import unittest
import svtplay_dl.utils
class timestrTest(unittest.TestCase):
def test_1(self):
self.assertEqual(svtplay_dl.utils.timestr(1), "00:00:00,00")
def test_100(self):
self.assertEqual(svtplay_dl.utils.timestr(100), "00:00:00,10")
def test_3600(self):
self.assertEqual(svtplay_dl.utils.timestr(3600), "00:00:03,60")
def test_3600000(self):
self.assertEqual(svtplay_dl.utils.timestr(3600000), "01:00:00,00")
|
<commit_before><commit_msg>Add unit tests for timestr()<commit_after>
|
#!/usr/bin/python
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
# The unittest framwork doesn't play nice with pylint:
# pylint: disable-msg=C0103
from __future__ import absolute_import
import unittest
import svtplay_dl.utils
class timestrTest(unittest.TestCase):
def test_1(self):
self.assertEqual(svtplay_dl.utils.timestr(1), "00:00:00,00")
def test_100(self):
self.assertEqual(svtplay_dl.utils.timestr(100), "00:00:00,10")
def test_3600(self):
self.assertEqual(svtplay_dl.utils.timestr(3600), "00:00:03,60")
def test_3600000(self):
self.assertEqual(svtplay_dl.utils.timestr(3600000), "01:00:00,00")
|
Add unit tests for timestr()#!/usr/bin/python
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
# The unittest framwork doesn't play nice with pylint:
# pylint: disable-msg=C0103
from __future__ import absolute_import
import unittest
import svtplay_dl.utils
class timestrTest(unittest.TestCase):
def test_1(self):
self.assertEqual(svtplay_dl.utils.timestr(1), "00:00:00,00")
def test_100(self):
self.assertEqual(svtplay_dl.utils.timestr(100), "00:00:00,10")
def test_3600(self):
self.assertEqual(svtplay_dl.utils.timestr(3600), "00:00:03,60")
def test_3600000(self):
self.assertEqual(svtplay_dl.utils.timestr(3600000), "01:00:00,00")
|
<commit_before><commit_msg>Add unit tests for timestr()<commit_after>#!/usr/bin/python
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
# The unittest framwork doesn't play nice with pylint:
# pylint: disable-msg=C0103
from __future__ import absolute_import
import unittest
import svtplay_dl.utils
class timestrTest(unittest.TestCase):
def test_1(self):
self.assertEqual(svtplay_dl.utils.timestr(1), "00:00:00,00")
def test_100(self):
self.assertEqual(svtplay_dl.utils.timestr(100), "00:00:00,10")
def test_3600(self):
self.assertEqual(svtplay_dl.utils.timestr(3600), "00:00:03,60")
def test_3600000(self):
self.assertEqual(svtplay_dl.utils.timestr(3600000), "01:00:00,00")
|
|
a1039c2e38243b64d2027621aa87ee020636f23b
|
tests/test_views.py
|
tests/test_views.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import website
import unittest
import tempfile
class FPOTestCase(unittest.TestCase):
def test_homepage(self):
self.app = website.app.test_client()
resp = self.app.get('/')
self.assertEqual(resp.status_code, 200)
def test_admin(self):
self.app = website.app.test_client()
resp = self.app.get('/admin/')
self.assertEqual(resp.status_code, 200)
if __name__ == '__main__':
unittest.main()
|
Add initial test for routes.
|
Add initial test for routes.
|
Python
|
mit
|
jonathanchu/fpo,jonathanchu/fpo
|
Add initial test for routes.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import website
import unittest
import tempfile
class FPOTestCase(unittest.TestCase):
def test_homepage(self):
self.app = website.app.test_client()
resp = self.app.get('/')
self.assertEqual(resp.status_code, 200)
def test_admin(self):
self.app = website.app.test_client()
resp = self.app.get('/admin/')
self.assertEqual(resp.status_code, 200)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add initial test for routes.<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import website
import unittest
import tempfile
class FPOTestCase(unittest.TestCase):
def test_homepage(self):
self.app = website.app.test_client()
resp = self.app.get('/')
self.assertEqual(resp.status_code, 200)
def test_admin(self):
self.app = website.app.test_client()
resp = self.app.get('/admin/')
self.assertEqual(resp.status_code, 200)
if __name__ == '__main__':
unittest.main()
|
Add initial test for routes.#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import website
import unittest
import tempfile
class FPOTestCase(unittest.TestCase):
def test_homepage(self):
self.app = website.app.test_client()
resp = self.app.get('/')
self.assertEqual(resp.status_code, 200)
def test_admin(self):
self.app = website.app.test_client()
resp = self.app.get('/admin/')
self.assertEqual(resp.status_code, 200)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add initial test for routes.<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import website
import unittest
import tempfile
class FPOTestCase(unittest.TestCase):
def test_homepage(self):
self.app = website.app.test_client()
resp = self.app.get('/')
self.assertEqual(resp.status_code, 200)
def test_admin(self):
self.app = website.app.test_client()
resp = self.app.get('/admin/')
self.assertEqual(resp.status_code, 200)
if __name__ == '__main__':
unittest.main()
|
|
278920272efd7ab959d7cad5b5f7d6c17935c7e6
|
problem_35.py
|
problem_35.py
|
from math import sqrt
from time import time
PRIME_STATUS = {}
def is_prime(n):
if n == 2:
return True
if n % 2 == 0 or n <= 1:
return False
for i in range(3, int(sqrt(n))+1, 2):
if n % i == 0:
return False
return True
def check_prime_circles(num):
circles = []
s = str(num)
for i in range(len(s)):
circle = int(s[i:] + s[0:i])
circles.append(circle)
if circle not in PRIME_STATUS:
PRIME_STATUS[circle] = is_prime(circle)
if not PRIME_STATUS[circle]:
return False
return True
def main():
circular_primes = []
for num in range(2, 1000000):
if check_prime_circles(num):
circular_primes.append(num)
print 'Circular primes:', circular_primes
print 'Amount of circular primes:', len(circular_primes)
if __name__ == '__main__':
t = time()
main()
print 'Time:', time() - t
|
Add problem 35, circular primes
|
Add problem 35, circular primes
|
Python
|
mit
|
dimkarakostas/project-euler
|
Add problem 35, circular primes
|
from math import sqrt
from time import time
PRIME_STATUS = {}
def is_prime(n):
if n == 2:
return True
if n % 2 == 0 or n <= 1:
return False
for i in range(3, int(sqrt(n))+1, 2):
if n % i == 0:
return False
return True
def check_prime_circles(num):
circles = []
s = str(num)
for i in range(len(s)):
circle = int(s[i:] + s[0:i])
circles.append(circle)
if circle not in PRIME_STATUS:
PRIME_STATUS[circle] = is_prime(circle)
if not PRIME_STATUS[circle]:
return False
return True
def main():
circular_primes = []
for num in range(2, 1000000):
if check_prime_circles(num):
circular_primes.append(num)
print 'Circular primes:', circular_primes
print 'Amount of circular primes:', len(circular_primes)
if __name__ == '__main__':
t = time()
main()
print 'Time:', time() - t
|
<commit_before><commit_msg>Add problem 35, circular primes<commit_after>
|
from math import sqrt
from time import time
PRIME_STATUS = {}
def is_prime(n):
if n == 2:
return True
if n % 2 == 0 or n <= 1:
return False
for i in range(3, int(sqrt(n))+1, 2):
if n % i == 0:
return False
return True
def check_prime_circles(num):
circles = []
s = str(num)
for i in range(len(s)):
circle = int(s[i:] + s[0:i])
circles.append(circle)
if circle not in PRIME_STATUS:
PRIME_STATUS[circle] = is_prime(circle)
if not PRIME_STATUS[circle]:
return False
return True
def main():
circular_primes = []
for num in range(2, 1000000):
if check_prime_circles(num):
circular_primes.append(num)
print 'Circular primes:', circular_primes
print 'Amount of circular primes:', len(circular_primes)
if __name__ == '__main__':
t = time()
main()
print 'Time:', time() - t
|
Add problem 35, circular primesfrom math import sqrt
from time import time
PRIME_STATUS = {}
def is_prime(n):
if n == 2:
return True
if n % 2 == 0 or n <= 1:
return False
for i in range(3, int(sqrt(n))+1, 2):
if n % i == 0:
return False
return True
def check_prime_circles(num):
circles = []
s = str(num)
for i in range(len(s)):
circle = int(s[i:] + s[0:i])
circles.append(circle)
if circle not in PRIME_STATUS:
PRIME_STATUS[circle] = is_prime(circle)
if not PRIME_STATUS[circle]:
return False
return True
def main():
circular_primes = []
for num in range(2, 1000000):
if check_prime_circles(num):
circular_primes.append(num)
print 'Circular primes:', circular_primes
print 'Amount of circular primes:', len(circular_primes)
if __name__ == '__main__':
t = time()
main()
print 'Time:', time() - t
|
<commit_before><commit_msg>Add problem 35, circular primes<commit_after>from math import sqrt
from time import time
PRIME_STATUS = {}
def is_prime(n):
if n == 2:
return True
if n % 2 == 0 or n <= 1:
return False
for i in range(3, int(sqrt(n))+1, 2):
if n % i == 0:
return False
return True
def check_prime_circles(num):
circles = []
s = str(num)
for i in range(len(s)):
circle = int(s[i:] + s[0:i])
circles.append(circle)
if circle not in PRIME_STATUS:
PRIME_STATUS[circle] = is_prime(circle)
if not PRIME_STATUS[circle]:
return False
return True
def main():
circular_primes = []
for num in range(2, 1000000):
if check_prime_circles(num):
circular_primes.append(num)
print 'Circular primes:', circular_primes
print 'Amount of circular primes:', len(circular_primes)
if __name__ == '__main__':
t = time()
main()
print 'Time:', time() - t
|
|
dad430fd56b8be22bd1a3b9773f9948c3e305883
|
stringlike/test/lazy_tests.py
|
stringlike/test/lazy_tests.py
|
import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')))
from stringlike.lazy import LazyString, CachedLazyString
from unittest import main, TestCase
class TestLazyString(TestCase):
def test_equality(self):
self.assertEqual(LazyString(lambda: 'abc'), 'abc')
def test_delay(self):
self.evaluateCount = 0
def func():
self.evaluateCount += 1
return 'abc'
lazyString = LazyString(func)
self.assertEqual(self.evaluateCount, 0)
self.assertEqual(lazyString, 'abc')
self.assertEqual(self.evaluateCount, 1)
self.assertEqual(lazyString, 'abc')
self.assertEqual(self.evaluateCount, 2)
class TestCachedLazyString(TestCase):
def test_equality(self):
self.assertEqual(CachedLazyString(lambda: 'abc'), 'abc')
def test_delay(self):
self.evaluateCount = 0
def func():
self.evaluateCount += 1
return 'abc'
cachedLazyString = CachedLazyString(func)
self.assertEqual(self.evaluateCount, 0)
self.assertEqual(cachedLazyString, 'abc')
self.assertEqual(self.evaluateCount, 1)
self.assertEqual(cachedLazyString, 'abc')
self.assertEqual(self.evaluateCount, 1)
if __name__ == '__main__':
main()
|
Add unit tests for lazy strings
|
Add unit tests for lazy strings
|
Python
|
mit
|
CovenantEyes/py_stringlike
|
Add unit tests for lazy strings
|
import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')))
from stringlike.lazy import LazyString, CachedLazyString
from unittest import main, TestCase
class TestLazyString(TestCase):
def test_equality(self):
self.assertEqual(LazyString(lambda: 'abc'), 'abc')
def test_delay(self):
self.evaluateCount = 0
def func():
self.evaluateCount += 1
return 'abc'
lazyString = LazyString(func)
self.assertEqual(self.evaluateCount, 0)
self.assertEqual(lazyString, 'abc')
self.assertEqual(self.evaluateCount, 1)
self.assertEqual(lazyString, 'abc')
self.assertEqual(self.evaluateCount, 2)
class TestCachedLazyString(TestCase):
def test_equality(self):
self.assertEqual(CachedLazyString(lambda: 'abc'), 'abc')
def test_delay(self):
self.evaluateCount = 0
def func():
self.evaluateCount += 1
return 'abc'
cachedLazyString = CachedLazyString(func)
self.assertEqual(self.evaluateCount, 0)
self.assertEqual(cachedLazyString, 'abc')
self.assertEqual(self.evaluateCount, 1)
self.assertEqual(cachedLazyString, 'abc')
self.assertEqual(self.evaluateCount, 1)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add unit tests for lazy strings<commit_after>
|
import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')))
from stringlike.lazy import LazyString, CachedLazyString
from unittest import main, TestCase
class TestLazyString(TestCase):
def test_equality(self):
self.assertEqual(LazyString(lambda: 'abc'), 'abc')
def test_delay(self):
self.evaluateCount = 0
def func():
self.evaluateCount += 1
return 'abc'
lazyString = LazyString(func)
self.assertEqual(self.evaluateCount, 0)
self.assertEqual(lazyString, 'abc')
self.assertEqual(self.evaluateCount, 1)
self.assertEqual(lazyString, 'abc')
self.assertEqual(self.evaluateCount, 2)
class TestCachedLazyString(TestCase):
def test_equality(self):
self.assertEqual(CachedLazyString(lambda: 'abc'), 'abc')
def test_delay(self):
self.evaluateCount = 0
def func():
self.evaluateCount += 1
return 'abc'
cachedLazyString = CachedLazyString(func)
self.assertEqual(self.evaluateCount, 0)
self.assertEqual(cachedLazyString, 'abc')
self.assertEqual(self.evaluateCount, 1)
self.assertEqual(cachedLazyString, 'abc')
self.assertEqual(self.evaluateCount, 1)
if __name__ == '__main__':
main()
|
Add unit tests for lazy stringsimport sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')))
from stringlike.lazy import LazyString, CachedLazyString
from unittest import main, TestCase
class TestLazyString(TestCase):
def test_equality(self):
self.assertEqual(LazyString(lambda: 'abc'), 'abc')
def test_delay(self):
self.evaluateCount = 0
def func():
self.evaluateCount += 1
return 'abc'
lazyString = LazyString(func)
self.assertEqual(self.evaluateCount, 0)
self.assertEqual(lazyString, 'abc')
self.assertEqual(self.evaluateCount, 1)
self.assertEqual(lazyString, 'abc')
self.assertEqual(self.evaluateCount, 2)
class TestCachedLazyString(TestCase):
def test_equality(self):
self.assertEqual(CachedLazyString(lambda: 'abc'), 'abc')
def test_delay(self):
self.evaluateCount = 0
def func():
self.evaluateCount += 1
return 'abc'
cachedLazyString = CachedLazyString(func)
self.assertEqual(self.evaluateCount, 0)
self.assertEqual(cachedLazyString, 'abc')
self.assertEqual(self.evaluateCount, 1)
self.assertEqual(cachedLazyString, 'abc')
self.assertEqual(self.evaluateCount, 1)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add unit tests for lazy strings<commit_after>import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')))
from stringlike.lazy import LazyString, CachedLazyString
from unittest import main, TestCase
class TestLazyString(TestCase):
def test_equality(self):
self.assertEqual(LazyString(lambda: 'abc'), 'abc')
def test_delay(self):
self.evaluateCount = 0
def func():
self.evaluateCount += 1
return 'abc'
lazyString = LazyString(func)
self.assertEqual(self.evaluateCount, 0)
self.assertEqual(lazyString, 'abc')
self.assertEqual(self.evaluateCount, 1)
self.assertEqual(lazyString, 'abc')
self.assertEqual(self.evaluateCount, 2)
class TestCachedLazyString(TestCase):
def test_equality(self):
self.assertEqual(CachedLazyString(lambda: 'abc'), 'abc')
def test_delay(self):
self.evaluateCount = 0
def func():
self.evaluateCount += 1
return 'abc'
cachedLazyString = CachedLazyString(func)
self.assertEqual(self.evaluateCount, 0)
self.assertEqual(cachedLazyString, 'abc')
self.assertEqual(self.evaluateCount, 1)
self.assertEqual(cachedLazyString, 'abc')
self.assertEqual(self.evaluateCount, 1)
if __name__ == '__main__':
main()
|
|
e7b6aef4db85c777463d2335107145b60b678ae2
|
examples/tour_examples/maps_introjs_tour.py
|
examples/tour_examples/maps_introjs_tour.py
|
from seleniumbase import BaseCase
class MyTourClass(BaseCase):
def test_google_maps_tour(self):
self.open("https://www.google.com/maps/@42.3598616,-71.0912631,15z")
self.wait_for_element("#searchboxinput")
self.wait_for_element("#minimap")
self.wait_for_element("#zoom")
self.create_tour(theme="introjs")
self.add_tour_step("Welcome to Google Maps!",
title="✅ SeleniumBase Tours 🌎")
self.add_tour_step("Type in a location here.", "#searchboxinput",
title="Search Box")
self.add_tour_step("Then click here to show it on the map.",
"#searchbox-searchbutton", alignment="bottom")
self.add_tour_step("Or click here to get driving directions.",
"#searchbox-directions", alignment="bottom")
self.add_tour_step("Use this button to switch to Satellite view.",
"#minimap div.widget-minimap", alignment="right")
self.add_tour_step("Click here to zoom in.", "#widget-zoom-in",
alignment="left")
self.add_tour_step("Or click here to zoom out.", "#widget-zoom-out",
alignment="left")
self.add_tour_step("Use the Menu button to see more options.",
".searchbox-hamburger-container", alignment="right")
self.add_tour_step("Or click here to see more Google apps.",
'[title="Google apps"]', alignment="left")
self.add_tour_step("Thanks for using SeleniumBase Tours!",
title="🚃 End of Guided Tour 🚃")
self.export_tour(filename="google_maps_introjs_tour.js")
self.play_tour()
|
Create a new tour example
|
Create a new tour example
|
Python
|
mit
|
seleniumbase/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase
|
Create a new tour example
|
from seleniumbase import BaseCase
class MyTourClass(BaseCase):
def test_google_maps_tour(self):
self.open("https://www.google.com/maps/@42.3598616,-71.0912631,15z")
self.wait_for_element("#searchboxinput")
self.wait_for_element("#minimap")
self.wait_for_element("#zoom")
self.create_tour(theme="introjs")
self.add_tour_step("Welcome to Google Maps!",
title="✅ SeleniumBase Tours 🌎")
self.add_tour_step("Type in a location here.", "#searchboxinput",
title="Search Box")
self.add_tour_step("Then click here to show it on the map.",
"#searchbox-searchbutton", alignment="bottom")
self.add_tour_step("Or click here to get driving directions.",
"#searchbox-directions", alignment="bottom")
self.add_tour_step("Use this button to switch to Satellite view.",
"#minimap div.widget-minimap", alignment="right")
self.add_tour_step("Click here to zoom in.", "#widget-zoom-in",
alignment="left")
self.add_tour_step("Or click here to zoom out.", "#widget-zoom-out",
alignment="left")
self.add_tour_step("Use the Menu button to see more options.",
".searchbox-hamburger-container", alignment="right")
self.add_tour_step("Or click here to see more Google apps.",
'[title="Google apps"]', alignment="left")
self.add_tour_step("Thanks for using SeleniumBase Tours!",
title="🚃 End of Guided Tour 🚃")
self.export_tour(filename="google_maps_introjs_tour.js")
self.play_tour()
|
<commit_before><commit_msg>Create a new tour example<commit_after>
|
from seleniumbase import BaseCase
class MyTourClass(BaseCase):
def test_google_maps_tour(self):
self.open("https://www.google.com/maps/@42.3598616,-71.0912631,15z")
self.wait_for_element("#searchboxinput")
self.wait_for_element("#minimap")
self.wait_for_element("#zoom")
self.create_tour(theme="introjs")
self.add_tour_step("Welcome to Google Maps!",
title="✅ SeleniumBase Tours 🌎")
self.add_tour_step("Type in a location here.", "#searchboxinput",
title="Search Box")
self.add_tour_step("Then click here to show it on the map.",
"#searchbox-searchbutton", alignment="bottom")
self.add_tour_step("Or click here to get driving directions.",
"#searchbox-directions", alignment="bottom")
self.add_tour_step("Use this button to switch to Satellite view.",
"#minimap div.widget-minimap", alignment="right")
self.add_tour_step("Click here to zoom in.", "#widget-zoom-in",
alignment="left")
self.add_tour_step("Or click here to zoom out.", "#widget-zoom-out",
alignment="left")
self.add_tour_step("Use the Menu button to see more options.",
".searchbox-hamburger-container", alignment="right")
self.add_tour_step("Or click here to see more Google apps.",
'[title="Google apps"]', alignment="left")
self.add_tour_step("Thanks for using SeleniumBase Tours!",
title="🚃 End of Guided Tour 🚃")
self.export_tour(filename="google_maps_introjs_tour.js")
self.play_tour()
|
Create a new tour examplefrom seleniumbase import BaseCase
class MyTourClass(BaseCase):
def test_google_maps_tour(self):
self.open("https://www.google.com/maps/@42.3598616,-71.0912631,15z")
self.wait_for_element("#searchboxinput")
self.wait_for_element("#minimap")
self.wait_for_element("#zoom")
self.create_tour(theme="introjs")
self.add_tour_step("Welcome to Google Maps!",
title="✅ SeleniumBase Tours 🌎")
self.add_tour_step("Type in a location here.", "#searchboxinput",
title="Search Box")
self.add_tour_step("Then click here to show it on the map.",
"#searchbox-searchbutton", alignment="bottom")
self.add_tour_step("Or click here to get driving directions.",
"#searchbox-directions", alignment="bottom")
self.add_tour_step("Use this button to switch to Satellite view.",
"#minimap div.widget-minimap", alignment="right")
self.add_tour_step("Click here to zoom in.", "#widget-zoom-in",
alignment="left")
self.add_tour_step("Or click here to zoom out.", "#widget-zoom-out",
alignment="left")
self.add_tour_step("Use the Menu button to see more options.",
".searchbox-hamburger-container", alignment="right")
self.add_tour_step("Or click here to see more Google apps.",
'[title="Google apps"]', alignment="left")
self.add_tour_step("Thanks for using SeleniumBase Tours!",
title="🚃 End of Guided Tour 🚃")
self.export_tour(filename="google_maps_introjs_tour.js")
self.play_tour()
|
<commit_before><commit_msg>Create a new tour example<commit_after>from seleniumbase import BaseCase
class MyTourClass(BaseCase):
def test_google_maps_tour(self):
self.open("https://www.google.com/maps/@42.3598616,-71.0912631,15z")
self.wait_for_element("#searchboxinput")
self.wait_for_element("#minimap")
self.wait_for_element("#zoom")
self.create_tour(theme="introjs")
self.add_tour_step("Welcome to Google Maps!",
title="✅ SeleniumBase Tours 🌎")
self.add_tour_step("Type in a location here.", "#searchboxinput",
title="Search Box")
self.add_tour_step("Then click here to show it on the map.",
"#searchbox-searchbutton", alignment="bottom")
self.add_tour_step("Or click here to get driving directions.",
"#searchbox-directions", alignment="bottom")
self.add_tour_step("Use this button to switch to Satellite view.",
"#minimap div.widget-minimap", alignment="right")
self.add_tour_step("Click here to zoom in.", "#widget-zoom-in",
alignment="left")
self.add_tour_step("Or click here to zoom out.", "#widget-zoom-out",
alignment="left")
self.add_tour_step("Use the Menu button to see more options.",
".searchbox-hamburger-container", alignment="right")
self.add_tour_step("Or click here to see more Google apps.",
'[title="Google apps"]', alignment="left")
self.add_tour_step("Thanks for using SeleniumBase Tours!",
title="🚃 End of Guided Tour 🚃")
self.export_tour(filename="google_maps_introjs_tour.js")
self.play_tour()
|
|
d93916b1927f0ae099cee3cf93619d3113db147b
|
examples/anomaly_detection.py
|
examples/anomaly_detection.py
|
import math
from peewee import *
db = SqliteDatabase(':memory:')
class Reg(Model):
key = TextField()
value = IntegerField()
class Meta:
database = db
db.create_tables([Reg])
# Create a user-defined aggregate function suitable for computing the standard
# deviation of a series.
@db.aggregate('stddev')
class StdDev(object):
def __init__(self):
self.n = 0
self.values = []
def step(self, value):
self.n += 1
self.values.append(value)
def finalize(self):
if self.n < 2:
return 0
mean = sum(self.values) / self.n
sqsum = sum((i - mean) ** 2 for i in self.values)
return math.sqrt(sqsum / (self.n - 1))
values = [2, 3, 5, 2, 3, 12, 5, 3, 4, 1, 2, 1, -9, 3, 3, 5]
Reg.create_table()
Reg.insert_many([{'key': 'k%02d' % i, 'value': v}
for i, v in enumerate(values)]).execute()
# We'll calculate the mean and the standard deviation of the series in a common
# table expression, which will then be used by our query to find rows whose
# zscore exceeds a certain threshold.
cte = (Reg
.select(fn.avg(Reg.value), fn.stddev(Reg.value))
.cte('stats', columns=('series_mean', 'series_stddev')))
# The zscore is defined as the (value - mean) / stddev.
zscore = (Reg.value - cte.c.series_mean) / cte.c.series_stddev
# Find rows which fall outside of 2 standard deviations.
threshold = 2
query = (Reg
.select(Reg.key, Reg.value, zscore.alias('zscore'))
.from_(Reg, cte)
.where((zscore >= threshold) | (zscore <= -threshold))
.with_cte(cte))
for row in query:
print(row.key, row.value, round(row.zscore, 2))
db.close()
|
Add small example of basic anomaly detection w/peewee.
|
Add small example of basic anomaly detection w/peewee.
|
Python
|
mit
|
coleifer/peewee,coleifer/peewee,coleifer/peewee
|
Add small example of basic anomaly detection w/peewee.
|
import math
from peewee import *
db = SqliteDatabase(':memory:')
class Reg(Model):
key = TextField()
value = IntegerField()
class Meta:
database = db
db.create_tables([Reg])
# Create a user-defined aggregate function suitable for computing the standard
# deviation of a series.
@db.aggregate('stddev')
class StdDev(object):
def __init__(self):
self.n = 0
self.values = []
def step(self, value):
self.n += 1
self.values.append(value)
def finalize(self):
if self.n < 2:
return 0
mean = sum(self.values) / self.n
sqsum = sum((i - mean) ** 2 for i in self.values)
return math.sqrt(sqsum / (self.n - 1))
values = [2, 3, 5, 2, 3, 12, 5, 3, 4, 1, 2, 1, -9, 3, 3, 5]
Reg.create_table()
Reg.insert_many([{'key': 'k%02d' % i, 'value': v}
for i, v in enumerate(values)]).execute()
# We'll calculate the mean and the standard deviation of the series in a common
# table expression, which will then be used by our query to find rows whose
# zscore exceeds a certain threshold.
cte = (Reg
.select(fn.avg(Reg.value), fn.stddev(Reg.value))
.cte('stats', columns=('series_mean', 'series_stddev')))
# The zscore is defined as the (value - mean) / stddev.
zscore = (Reg.value - cte.c.series_mean) / cte.c.series_stddev
# Find rows which fall outside of 2 standard deviations.
threshold = 2
query = (Reg
.select(Reg.key, Reg.value, zscore.alias('zscore'))
.from_(Reg, cte)
.where((zscore >= threshold) | (zscore <= -threshold))
.with_cte(cte))
for row in query:
print(row.key, row.value, round(row.zscore, 2))
db.close()
|
<commit_before><commit_msg>Add small example of basic anomaly detection w/peewee.<commit_after>
|
import math
from peewee import *
db = SqliteDatabase(':memory:')
class Reg(Model):
key = TextField()
value = IntegerField()
class Meta:
database = db
db.create_tables([Reg])
# Create a user-defined aggregate function suitable for computing the standard
# deviation of a series.
@db.aggregate('stddev')
class StdDev(object):
def __init__(self):
self.n = 0
self.values = []
def step(self, value):
self.n += 1
self.values.append(value)
def finalize(self):
if self.n < 2:
return 0
mean = sum(self.values) / self.n
sqsum = sum((i - mean) ** 2 for i in self.values)
return math.sqrt(sqsum / (self.n - 1))
values = [2, 3, 5, 2, 3, 12, 5, 3, 4, 1, 2, 1, -9, 3, 3, 5]
Reg.create_table()
Reg.insert_many([{'key': 'k%02d' % i, 'value': v}
for i, v in enumerate(values)]).execute()
# We'll calculate the mean and the standard deviation of the series in a common
# table expression, which will then be used by our query to find rows whose
# zscore exceeds a certain threshold.
cte = (Reg
.select(fn.avg(Reg.value), fn.stddev(Reg.value))
.cte('stats', columns=('series_mean', 'series_stddev')))
# The zscore is defined as the (value - mean) / stddev.
zscore = (Reg.value - cte.c.series_mean) / cte.c.series_stddev
# Find rows which fall outside of 2 standard deviations.
threshold = 2
query = (Reg
.select(Reg.key, Reg.value, zscore.alias('zscore'))
.from_(Reg, cte)
.where((zscore >= threshold) | (zscore <= -threshold))
.with_cte(cte))
for row in query:
print(row.key, row.value, round(row.zscore, 2))
db.close()
|
Add small example of basic anomaly detection w/peewee.import math
from peewee import *
db = SqliteDatabase(':memory:')
class Reg(Model):
key = TextField()
value = IntegerField()
class Meta:
database = db
db.create_tables([Reg])
# Create a user-defined aggregate function suitable for computing the standard
# deviation of a series.
@db.aggregate('stddev')
class StdDev(object):
def __init__(self):
self.n = 0
self.values = []
def step(self, value):
self.n += 1
self.values.append(value)
def finalize(self):
if self.n < 2:
return 0
mean = sum(self.values) / self.n
sqsum = sum((i - mean) ** 2 for i in self.values)
return math.sqrt(sqsum / (self.n - 1))
values = [2, 3, 5, 2, 3, 12, 5, 3, 4, 1, 2, 1, -9, 3, 3, 5]
Reg.create_table()
Reg.insert_many([{'key': 'k%02d' % i, 'value': v}
for i, v in enumerate(values)]).execute()
# We'll calculate the mean and the standard deviation of the series in a common
# table expression, which will then be used by our query to find rows whose
# zscore exceeds a certain threshold.
cte = (Reg
.select(fn.avg(Reg.value), fn.stddev(Reg.value))
.cte('stats', columns=('series_mean', 'series_stddev')))
# The zscore is defined as the (value - mean) / stddev.
zscore = (Reg.value - cte.c.series_mean) / cte.c.series_stddev
# Find rows which fall outside of 2 standard deviations.
threshold = 2
query = (Reg
.select(Reg.key, Reg.value, zscore.alias('zscore'))
.from_(Reg, cte)
.where((zscore >= threshold) | (zscore <= -threshold))
.with_cte(cte))
for row in query:
print(row.key, row.value, round(row.zscore, 2))
db.close()
|
<commit_before><commit_msg>Add small example of basic anomaly detection w/peewee.<commit_after>import math
from peewee import *
db = SqliteDatabase(':memory:')
class Reg(Model):
key = TextField()
value = IntegerField()
class Meta:
database = db
db.create_tables([Reg])
# Create a user-defined aggregate function suitable for computing the standard
# deviation of a series.
@db.aggregate('stddev')
class StdDev(object):
def __init__(self):
self.n = 0
self.values = []
def step(self, value):
self.n += 1
self.values.append(value)
def finalize(self):
if self.n < 2:
return 0
mean = sum(self.values) / self.n
sqsum = sum((i - mean) ** 2 for i in self.values)
return math.sqrt(sqsum / (self.n - 1))
values = [2, 3, 5, 2, 3, 12, 5, 3, 4, 1, 2, 1, -9, 3, 3, 5]
Reg.create_table()
Reg.insert_many([{'key': 'k%02d' % i, 'value': v}
for i, v in enumerate(values)]).execute()
# We'll calculate the mean and the standard deviation of the series in a common
# table expression, which will then be used by our query to find rows whose
# zscore exceeds a certain threshold.
cte = (Reg
.select(fn.avg(Reg.value), fn.stddev(Reg.value))
.cte('stats', columns=('series_mean', 'series_stddev')))
# The zscore is defined as the (value - mean) / stddev.
zscore = (Reg.value - cte.c.series_mean) / cte.c.series_stddev
# Find rows which fall outside of 2 standard deviations.
threshold = 2
query = (Reg
.select(Reg.key, Reg.value, zscore.alias('zscore'))
.from_(Reg, cte)
.where((zscore >= threshold) | (zscore <= -threshold))
.with_cte(cte))
for row in query:
print(row.key, row.value, round(row.zscore, 2))
db.close()
|
|
10ccc510deab5c97ce8a6c5ee57232c5e399986e
|
decision_tree.py
|
decision_tree.py
|
import pandas as pd
from sklearn import tree
# X = [[0, 1], [1, 1]]
# Y = [0, 1]
#clf = tree.DecisionTreeClassifier()
#clf = clf.fit(X, Y)
data = pd.read_excel('/home/andre/sandbox/jhu-immuno/journal.pcbi.1003266.s001-2.XLS')
resp_cols = [ 'MHC' ]
data['y'] = data.Immunogenicity.map({'non-immunogenic': 0, 'immunogenic': 1 })
X = data[resp_cols]
Y = data.y
clf = tree.DecisionTreeClassifier()
dummy = pd.get_dummies(data.MHC)
clf.fit(dummy, Y)
from sklearn.externals.six import StringIO
f = tree.export_graphviz(clf, out_file = 'decision_tree')
|
Add decision tree classifier attempt.
|
Add decision tree classifier attempt.
|
Python
|
mit
|
andretadeu/jhu-immuno,andretadeu/jhu-immuno
|
Add decision tree classifier attempt.
|
import pandas as pd
from sklearn import tree
# X = [[0, 1], [1, 1]]
# Y = [0, 1]
#clf = tree.DecisionTreeClassifier()
#clf = clf.fit(X, Y)
data = pd.read_excel('/home/andre/sandbox/jhu-immuno/journal.pcbi.1003266.s001-2.XLS')
resp_cols = [ 'MHC' ]
data['y'] = data.Immunogenicity.map({'non-immunogenic': 0, 'immunogenic': 1 })
X = data[resp_cols]
Y = data.y
clf = tree.DecisionTreeClassifier()
dummy = pd.get_dummies(data.MHC)
clf.fit(dummy, Y)
from sklearn.externals.six import StringIO
f = tree.export_graphviz(clf, out_file = 'decision_tree')
|
<commit_before><commit_msg>Add decision tree classifier attempt.<commit_after>
|
import pandas as pd
from sklearn import tree
# X = [[0, 1], [1, 1]]
# Y = [0, 1]
#clf = tree.DecisionTreeClassifier()
#clf = clf.fit(X, Y)
data = pd.read_excel('/home/andre/sandbox/jhu-immuno/journal.pcbi.1003266.s001-2.XLS')
resp_cols = [ 'MHC' ]
data['y'] = data.Immunogenicity.map({'non-immunogenic': 0, 'immunogenic': 1 })
X = data[resp_cols]
Y = data.y
clf = tree.DecisionTreeClassifier()
dummy = pd.get_dummies(data.MHC)
clf.fit(dummy, Y)
from sklearn.externals.six import StringIO
f = tree.export_graphviz(clf, out_file = 'decision_tree')
|
Add decision tree classifier attempt.import pandas as pd
from sklearn import tree
# X = [[0, 1], [1, 1]]
# Y = [0, 1]
#clf = tree.DecisionTreeClassifier()
#clf = clf.fit(X, Y)
data = pd.read_excel('/home/andre/sandbox/jhu-immuno/journal.pcbi.1003266.s001-2.XLS')
resp_cols = [ 'MHC' ]
data['y'] = data.Immunogenicity.map({'non-immunogenic': 0, 'immunogenic': 1 })
X = data[resp_cols]
Y = data.y
clf = tree.DecisionTreeClassifier()
dummy = pd.get_dummies(data.MHC)
clf.fit(dummy, Y)
from sklearn.externals.six import StringIO
f = tree.export_graphviz(clf, out_file = 'decision_tree')
|
<commit_before><commit_msg>Add decision tree classifier attempt.<commit_after>import pandas as pd
from sklearn import tree
# X = [[0, 1], [1, 1]]
# Y = [0, 1]
#clf = tree.DecisionTreeClassifier()
#clf = clf.fit(X, Y)
data = pd.read_excel('/home/andre/sandbox/jhu-immuno/journal.pcbi.1003266.s001-2.XLS')
resp_cols = [ 'MHC' ]
data['y'] = data.Immunogenicity.map({'non-immunogenic': 0, 'immunogenic': 1 })
X = data[resp_cols]
Y = data.y
clf = tree.DecisionTreeClassifier()
dummy = pd.get_dummies(data.MHC)
clf.fit(dummy, Y)
from sklearn.externals.six import StringIO
f = tree.export_graphviz(clf, out_file = 'decision_tree')
|
|
8adfedd0c30fab796fccac6ec58c09e644a91b2f
|
shuffle_fastq.py
|
shuffle_fastq.py
|
# shuffles the sequences in a fastq file
import os
import random
from Bio import SeqIO
import fileinput
from argparse import ArgumentParser
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--fq1", required="True")
parser.add_argument("--fq2", required="True")
args = parser.parse_args()
with open(args.fq1) as in_handle:
fq1 = [x for x in SeqIO.parse(in_handle, "fastq-sanger")]
with open(args.fq2) as in_handle:
fq2 = [x for x in SeqIO.parse(in_handle, "fastq-sanger")]
order = range(len(fq1))
random.shuffle(order)
fq1_name = os.path.splitext(args.fq1)[0]
fq2_name = os.path.splitext(args.fq2)[0]
with open(fq1_name + ".shuffled.fq", "wa") as fq1_handle, open(fq2_name + ".shuffled.fq", "wa") as fq2_handle:
for i in order:
fq1_handle.write(fq1[i].format("fastq-sanger"))
fq2_handle.write(fq2[i].format("fastq-sanger"))
|
Add script to shuffle paired fastq sequences.
|
Add script to shuffle paired fastq sequences.
|
Python
|
mit
|
roryk/junkdrawer,roryk/junkdrawer
|
Add script to shuffle paired fastq sequences.
|
# shuffles the sequences in a fastq file
import os
import random
from Bio import SeqIO
import fileinput
from argparse import ArgumentParser
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--fq1", required="True")
parser.add_argument("--fq2", required="True")
args = parser.parse_args()
with open(args.fq1) as in_handle:
fq1 = [x for x in SeqIO.parse(in_handle, "fastq-sanger")]
with open(args.fq2) as in_handle:
fq2 = [x for x in SeqIO.parse(in_handle, "fastq-sanger")]
order = range(len(fq1))
random.shuffle(order)
fq1_name = os.path.splitext(args.fq1)[0]
fq2_name = os.path.splitext(args.fq2)[0]
with open(fq1_name + ".shuffled.fq", "wa") as fq1_handle, open(fq2_name + ".shuffled.fq", "wa") as fq2_handle:
for i in order:
fq1_handle.write(fq1[i].format("fastq-sanger"))
fq2_handle.write(fq2[i].format("fastq-sanger"))
|
<commit_before><commit_msg>Add script to shuffle paired fastq sequences.<commit_after>
|
# shuffles the sequences in a fastq file
import os
import random
from Bio import SeqIO
import fileinput
from argparse import ArgumentParser
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--fq1", required="True")
parser.add_argument("--fq2", required="True")
args = parser.parse_args()
with open(args.fq1) as in_handle:
fq1 = [x for x in SeqIO.parse(in_handle, "fastq-sanger")]
with open(args.fq2) as in_handle:
fq2 = [x for x in SeqIO.parse(in_handle, "fastq-sanger")]
order = range(len(fq1))
random.shuffle(order)
fq1_name = os.path.splitext(args.fq1)[0]
fq2_name = os.path.splitext(args.fq2)[0]
with open(fq1_name + ".shuffled.fq", "wa") as fq1_handle, open(fq2_name + ".shuffled.fq", "wa") as fq2_handle:
for i in order:
fq1_handle.write(fq1[i].format("fastq-sanger"))
fq2_handle.write(fq2[i].format("fastq-sanger"))
|
Add script to shuffle paired fastq sequences.# shuffles the sequences in a fastq file
import os
import random
from Bio import SeqIO
import fileinput
from argparse import ArgumentParser
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--fq1", required="True")
parser.add_argument("--fq2", required="True")
args = parser.parse_args()
with open(args.fq1) as in_handle:
fq1 = [x for x in SeqIO.parse(in_handle, "fastq-sanger")]
with open(args.fq2) as in_handle:
fq2 = [x for x in SeqIO.parse(in_handle, "fastq-sanger")]
order = range(len(fq1))
random.shuffle(order)
fq1_name = os.path.splitext(args.fq1)[0]
fq2_name = os.path.splitext(args.fq2)[0]
with open(fq1_name + ".shuffled.fq", "wa") as fq1_handle, open(fq2_name + ".shuffled.fq", "wa") as fq2_handle:
for i in order:
fq1_handle.write(fq1[i].format("fastq-sanger"))
fq2_handle.write(fq2[i].format("fastq-sanger"))
|
<commit_before><commit_msg>Add script to shuffle paired fastq sequences.<commit_after># shuffles the sequences in a fastq file
import os
import random
from Bio import SeqIO
import fileinput
from argparse import ArgumentParser
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--fq1", required="True")
parser.add_argument("--fq2", required="True")
args = parser.parse_args()
with open(args.fq1) as in_handle:
fq1 = [x for x in SeqIO.parse(in_handle, "fastq-sanger")]
with open(args.fq2) as in_handle:
fq2 = [x for x in SeqIO.parse(in_handle, "fastq-sanger")]
order = range(len(fq1))
random.shuffle(order)
fq1_name = os.path.splitext(args.fq1)[0]
fq2_name = os.path.splitext(args.fq2)[0]
with open(fq1_name + ".shuffled.fq", "wa") as fq1_handle, open(fq2_name + ".shuffled.fq", "wa") as fq2_handle:
for i in order:
fq1_handle.write(fq1[i].format("fastq-sanger"))
fq2_handle.write(fq2[i].format("fastq-sanger"))
|
|
ffb5caf83055e734baf711366b6779ecb24a013c
|
addons/adobe/clone.py
|
addons/adobe/clone.py
|
#!/usr/bin/env python
from PIL import Image, ImageEnhance
import PIL.ImageOps
import fnmatch
import shutil
import os
def globPath(path, pattern):
result = []
for root, subdirs, files in os.walk(path):
for filename in files:
if fnmatch.fnmatch(filename, pattern):
result.append(os.path.join(root, filename))
return result
def inverse(inpng, outpng):
image = Image.open(inpng)
if image.mode == 'RGBA':
r, g, b, a = image.split()
rgb_image = Image.merge('RGB', (r, g, b))
inverted_image = PIL.ImageOps.invert(rgb_image)
r2, g2, b2 = inverted_image.split()
final_transparent_image = Image.merge('RGBA', (r2, g2, b2, a))
final_transparent_image.save(outpng)
else:
inverted_image = PIL.ImageOps.invert(image)
inverted_image.save(outpng)
def darken(inpng, outpng, darkness):
im1 = Image.open(inpng)
im2 = im1.point(lambda p: p * darkness)
im2.save(outpng)
def bright(inpng, outpng, brightness):
peak = Image.open(inpng)
enhancer = ImageEnhance.Brightness(peak)
bright = enhancer.enhance(brightness)
bright.save(outpng)
def makeClone(name, brightness):
outdir = os.path.join("..", name)
if not os.path.isdir(outdir):
os.makedirs(outdir)
for p in globPath('.', "**"):
outfile = os.path.join(outdir, p)
curdir = os.path.dirname(outfile)
if not os.path.isdir(curdir):
os.makedirs(curdir)
if p.endswith(".png"):
bright(p, outfile, brightness)
elif p.endswith(".tres"):
content = open(p).read()
content = content.replace("res://addons/adobe/", "res://addons/{}/".format(name))
of = open(outfile, 'w')
of.write(content)
of.close()
else:
shutil.copy(p, outfile)
makeClone("adobe_dark", 0.65)
makeClone("adobe_light", 1.35)
|
Add script to generate other adobe themes
|
Add script to generate other adobe themes
|
Python
|
mit
|
Geequlim/godot-themes
|
Add script to generate other adobe themes
|
#!/usr/bin/env python
from PIL import Image, ImageEnhance
import PIL.ImageOps
import fnmatch
import shutil
import os
def globPath(path, pattern):
result = []
for root, subdirs, files in os.walk(path):
for filename in files:
if fnmatch.fnmatch(filename, pattern):
result.append(os.path.join(root, filename))
return result
def inverse(inpng, outpng):
image = Image.open(inpng)
if image.mode == 'RGBA':
r, g, b, a = image.split()
rgb_image = Image.merge('RGB', (r, g, b))
inverted_image = PIL.ImageOps.invert(rgb_image)
r2, g2, b2 = inverted_image.split()
final_transparent_image = Image.merge('RGBA', (r2, g2, b2, a))
final_transparent_image.save(outpng)
else:
inverted_image = PIL.ImageOps.invert(image)
inverted_image.save(outpng)
def darken(inpng, outpng, darkness):
im1 = Image.open(inpng)
im2 = im1.point(lambda p: p * darkness)
im2.save(outpng)
def bright(inpng, outpng, brightness):
peak = Image.open(inpng)
enhancer = ImageEnhance.Brightness(peak)
bright = enhancer.enhance(brightness)
bright.save(outpng)
def makeClone(name, brightness):
outdir = os.path.join("..", name)
if not os.path.isdir(outdir):
os.makedirs(outdir)
for p in globPath('.', "**"):
outfile = os.path.join(outdir, p)
curdir = os.path.dirname(outfile)
if not os.path.isdir(curdir):
os.makedirs(curdir)
if p.endswith(".png"):
bright(p, outfile, brightness)
elif p.endswith(".tres"):
content = open(p).read()
content = content.replace("res://addons/adobe/", "res://addons/{}/".format(name))
of = open(outfile, 'w')
of.write(content)
of.close()
else:
shutil.copy(p, outfile)
makeClone("adobe_dark", 0.65)
makeClone("adobe_light", 1.35)
|
<commit_before><commit_msg>Add script to generate other adobe themes<commit_after>
|
#!/usr/bin/env python
from PIL import Image, ImageEnhance
import PIL.ImageOps
import fnmatch
import shutil
import os
def globPath(path, pattern):
result = []
for root, subdirs, files in os.walk(path):
for filename in files:
if fnmatch.fnmatch(filename, pattern):
result.append(os.path.join(root, filename))
return result
def inverse(inpng, outpng):
image = Image.open(inpng)
if image.mode == 'RGBA':
r, g, b, a = image.split()
rgb_image = Image.merge('RGB', (r, g, b))
inverted_image = PIL.ImageOps.invert(rgb_image)
r2, g2, b2 = inverted_image.split()
final_transparent_image = Image.merge('RGBA', (r2, g2, b2, a))
final_transparent_image.save(outpng)
else:
inverted_image = PIL.ImageOps.invert(image)
inverted_image.save(outpng)
def darken(inpng, outpng, darkness):
im1 = Image.open(inpng)
im2 = im1.point(lambda p: p * darkness)
im2.save(outpng)
def bright(inpng, outpng, brightness):
peak = Image.open(inpng)
enhancer = ImageEnhance.Brightness(peak)
bright = enhancer.enhance(brightness)
bright.save(outpng)
def makeClone(name, brightness):
outdir = os.path.join("..", name)
if not os.path.isdir(outdir):
os.makedirs(outdir)
for p in globPath('.', "**"):
outfile = os.path.join(outdir, p)
curdir = os.path.dirname(outfile)
if not os.path.isdir(curdir):
os.makedirs(curdir)
if p.endswith(".png"):
bright(p, outfile, brightness)
elif p.endswith(".tres"):
content = open(p).read()
content = content.replace("res://addons/adobe/", "res://addons/{}/".format(name))
of = open(outfile, 'w')
of.write(content)
of.close()
else:
shutil.copy(p, outfile)
makeClone("adobe_dark", 0.65)
makeClone("adobe_light", 1.35)
|
Add script to generate other adobe themes#!/usr/bin/env python
from PIL import Image, ImageEnhance
import PIL.ImageOps
import fnmatch
import shutil
import os
def globPath(path, pattern):
result = []
for root, subdirs, files in os.walk(path):
for filename in files:
if fnmatch.fnmatch(filename, pattern):
result.append(os.path.join(root, filename))
return result
def inverse(inpng, outpng):
image = Image.open(inpng)
if image.mode == 'RGBA':
r, g, b, a = image.split()
rgb_image = Image.merge('RGB', (r, g, b))
inverted_image = PIL.ImageOps.invert(rgb_image)
r2, g2, b2 = inverted_image.split()
final_transparent_image = Image.merge('RGBA', (r2, g2, b2, a))
final_transparent_image.save(outpng)
else:
inverted_image = PIL.ImageOps.invert(image)
inverted_image.save(outpng)
def darken(inpng, outpng, darkness):
im1 = Image.open(inpng)
im2 = im1.point(lambda p: p * darkness)
im2.save(outpng)
def bright(inpng, outpng, brightness):
peak = Image.open(inpng)
enhancer = ImageEnhance.Brightness(peak)
bright = enhancer.enhance(brightness)
bright.save(outpng)
def makeClone(name, brightness):
outdir = os.path.join("..", name)
if not os.path.isdir(outdir):
os.makedirs(outdir)
for p in globPath('.', "**"):
outfile = os.path.join(outdir, p)
curdir = os.path.dirname(outfile)
if not os.path.isdir(curdir):
os.makedirs(curdir)
if p.endswith(".png"):
bright(p, outfile, brightness)
elif p.endswith(".tres"):
content = open(p).read()
content = content.replace("res://addons/adobe/", "res://addons/{}/".format(name))
of = open(outfile, 'w')
of.write(content)
of.close()
else:
shutil.copy(p, outfile)
makeClone("adobe_dark", 0.65)
makeClone("adobe_light", 1.35)
|
<commit_before><commit_msg>Add script to generate other adobe themes<commit_after>#!/usr/bin/env python
from PIL import Image, ImageEnhance
import PIL.ImageOps
import fnmatch
import shutil
import os
def globPath(path, pattern):
result = []
for root, subdirs, files in os.walk(path):
for filename in files:
if fnmatch.fnmatch(filename, pattern):
result.append(os.path.join(root, filename))
return result
def inverse(inpng, outpng):
image = Image.open(inpng)
if image.mode == 'RGBA':
r, g, b, a = image.split()
rgb_image = Image.merge('RGB', (r, g, b))
inverted_image = PIL.ImageOps.invert(rgb_image)
r2, g2, b2 = inverted_image.split()
final_transparent_image = Image.merge('RGBA', (r2, g2, b2, a))
final_transparent_image.save(outpng)
else:
inverted_image = PIL.ImageOps.invert(image)
inverted_image.save(outpng)
def darken(inpng, outpng, darkness):
im1 = Image.open(inpng)
im2 = im1.point(lambda p: p * darkness)
im2.save(outpng)
def bright(inpng, outpng, brightness):
peak = Image.open(inpng)
enhancer = ImageEnhance.Brightness(peak)
bright = enhancer.enhance(brightness)
bright.save(outpng)
def makeClone(name, brightness):
outdir = os.path.join("..", name)
if not os.path.isdir(outdir):
os.makedirs(outdir)
for p in globPath('.', "**"):
outfile = os.path.join(outdir, p)
curdir = os.path.dirname(outfile)
if not os.path.isdir(curdir):
os.makedirs(curdir)
if p.endswith(".png"):
bright(p, outfile, brightness)
elif p.endswith(".tres"):
content = open(p).read()
content = content.replace("res://addons/adobe/", "res://addons/{}/".format(name))
of = open(outfile, 'w')
of.write(content)
of.close()
else:
shutil.copy(p, outfile)
makeClone("adobe_dark", 0.65)
makeClone("adobe_light", 1.35)
|
|
c5ecaef62d788b69446181c6ba495cb273bf98ef
|
altair/examples/scatter_with_rolling_mean.py
|
altair/examples/scatter_with_rolling_mean.py
|
"""
Scatter Plot with Rolling Mean
------------------------------
A scatter plot with a rolling mean overlay. In this example a 30 day window
is used to calculate the mean of the maximum temperature around each date.
"""
# category: scatter plots
import altair as alt
from vega_datasets import data
source = data.seattle_weather()
line = alt.Chart(source).mark_line(
color='red',
size=3
).transform_window(
rolling_mean='mean(temp_max)',
frame=[-15, 15]
).encode(
x='date:T',
y='rolling_mean:Q'
)
points = alt.Chart(source).mark_point().encode(
x='date:T',
y=alt.Y('temp_max:Q',
axis=alt.Axis(title='Max Temp'))
)
points + line
|
Add rolling mean scatter plot example
|
Add rolling mean scatter plot example
|
Python
|
bsd-3-clause
|
altair-viz/altair,jakevdp/altair
|
Add rolling mean scatter plot example
|
"""
Scatter Plot with Rolling Mean
------------------------------
A scatter plot with a rolling mean overlay. In this example a 30 day window
is used to calculate the mean of the maximum temperature around each date.
"""
# category: scatter plots
import altair as alt
from vega_datasets import data
source = data.seattle_weather()
line = alt.Chart(source).mark_line(
color='red',
size=3
).transform_window(
rolling_mean='mean(temp_max)',
frame=[-15, 15]
).encode(
x='date:T',
y='rolling_mean:Q'
)
points = alt.Chart(source).mark_point().encode(
x='date:T',
y=alt.Y('temp_max:Q',
axis=alt.Axis(title='Max Temp'))
)
points + line
|
<commit_before><commit_msg>Add rolling mean scatter plot example<commit_after>
|
"""
Scatter Plot with Rolling Mean
------------------------------
A scatter plot with a rolling mean overlay. In this example a 30 day window
is used to calculate the mean of the maximum temperature around each date.
"""
# category: scatter plots
import altair as alt
from vega_datasets import data
source = data.seattle_weather()
line = alt.Chart(source).mark_line(
color='red',
size=3
).transform_window(
rolling_mean='mean(temp_max)',
frame=[-15, 15]
).encode(
x='date:T',
y='rolling_mean:Q'
)
points = alt.Chart(source).mark_point().encode(
x='date:T',
y=alt.Y('temp_max:Q',
axis=alt.Axis(title='Max Temp'))
)
points + line
|
Add rolling mean scatter plot example"""
Scatter Plot with Rolling Mean
------------------------------
A scatter plot with a rolling mean overlay. In this example a 30 day window
is used to calculate the mean of the maximum temperature around each date.
"""
# category: scatter plots
import altair as alt
from vega_datasets import data
source = data.seattle_weather()
line = alt.Chart(source).mark_line(
color='red',
size=3
).transform_window(
rolling_mean='mean(temp_max)',
frame=[-15, 15]
).encode(
x='date:T',
y='rolling_mean:Q'
)
points = alt.Chart(source).mark_point().encode(
x='date:T',
y=alt.Y('temp_max:Q',
axis=alt.Axis(title='Max Temp'))
)
points + line
|
<commit_before><commit_msg>Add rolling mean scatter plot example<commit_after>"""
Scatter Plot with Rolling Mean
------------------------------
A scatter plot with a rolling mean overlay. In this example a 30 day window
is used to calculate the mean of the maximum temperature around each date.
"""
# category: scatter plots
import altair as alt
from vega_datasets import data
source = data.seattle_weather()
line = alt.Chart(source).mark_line(
color='red',
size=3
).transform_window(
rolling_mean='mean(temp_max)',
frame=[-15, 15]
).encode(
x='date:T',
y='rolling_mean:Q'
)
points = alt.Chart(source).mark_point().encode(
x='date:T',
y=alt.Y('temp_max:Q',
axis=alt.Axis(title='Max Temp'))
)
points + line
|
|
b3f91806b525ddef50d541f937bed539f9bae20a
|
mezzanine/project_template/deploy/live_settings.py
|
mezzanine/project_template/deploy/live_settings.py
|
DATABASES = {
"default": {
# Ends with "postgresql_psycopg2", "mysql", "sqlite3" or "oracle".
"ENGINE": "django.db.backends.postgresql_psycopg2",
# DB name or path to database file if using sqlite3.
"NAME": "%(proj_name)s",
# Not used with sqlite3.
"USER": "%(proj_name)s",
# Not used with sqlite3.
"PASSWORD": "%(db_pass)s",
# Set to empty string for localhost. Not used with sqlite3.
"HOST": "127.0.0.1",
# Set to empty string for default. Not used with sqlite3.
"PORT": "",
}
}
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTOCOL", "https")
CACHE_MIDDLEWARE_SECONDS = 60
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.memcached.MemcachedCache",
"LOCATION": "127.0.0.1:11211",
}
}
|
DATABASES = {
"default": {
# Ends with "postgresql_psycopg2", "mysql", "sqlite3" or "oracle".
"ENGINE": "django.db.backends.postgresql_psycopg2",
# DB name or path to database file if using sqlite3.
"NAME": "%(proj_name)s",
# Not used with sqlite3.
"USER": "%(proj_name)s",
# Not used with sqlite3.
"PASSWORD": "%(db_pass)s",
# Set to empty string for localhost. Not used with sqlite3.
"HOST": "127.0.0.1",
# Set to empty string for default. Not used with sqlite3.
"PORT": "",
}
}
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTOCOL", "https")
CACHE_MIDDLEWARE_SECONDS = 60
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.memcached.MemcachedCache",
"LOCATION": "127.0.0.1:11211",
}
}
SESSION_ENGINE = "django.contrib.sessions.backends.cache"
|
Use cache backend for sessions in deployed settings.
|
Use cache backend for sessions in deployed settings.
|
Python
|
bsd-2-clause
|
Kniyl/mezzanine,webounty/mezzanine,spookylukey/mezzanine,theclanks/mezzanine,batpad/mezzanine,sjdines/mezzanine,dovydas/mezzanine,readevalprint/mezzanine,eino-makitalo/mezzanine,industrydive/mezzanine,joshcartme/mezzanine,Cajoline/mezzanine,frankier/mezzanine,PegasusWang/mezzanine,biomassives/mezzanine,Skytorn86/mezzanine,adrian-the-git/mezzanine,agepoly/mezzanine,saintbird/mezzanine,damnfine/mezzanine,stbarnabas/mezzanine,dsanders11/mezzanine,biomassives/mezzanine,gradel/mezzanine,joshcartme/mezzanine,vladir/mezzanine,geodesign/mezzanine,molokov/mezzanine,geodesign/mezzanine,geodesign/mezzanine,sjuxax/mezzanine,orlenko/sfpirg,SoLoHiC/mezzanine,orlenko/sfpirg,wyzex/mezzanine,vladir/mezzanine,wyzex/mezzanine,douglaskastle/mezzanine,Cicero-Zhao/mezzanine,nikolas/mezzanine,theclanks/mezzanine,scarcry/snm-mezzanine,wyzex/mezzanine,frankchin/mezzanine,dekomote/mezzanine-modeltranslation-backport,dekomote/mezzanine-modeltranslation-backport,readevalprint/mezzanine,dsanders11/mezzanine,gbosh/mezzanine,saintbird/mezzanine,damnfine/mezzanine,molokov/mezzanine,scarcry/snm-mezzanine,SoLoHiC/mezzanine,christianwgd/mezzanine,sjuxax/mezzanine,stephenmcd/mezzanine,ZeroXn/mezzanine,vladir/mezzanine,batpad/mezzanine,nikolas/mezzanine,Kniyl/mezzanine,wrwrwr/mezzanine,biomassives/mezzanine,promil23/mezzanine,dekomote/mezzanine-modeltranslation-backport,Skytorn86/mezzanine,jerivas/mezzanine,cccs-web/mezzanine,AlexHill/mezzanine,Cajoline/mezzanine,mush42/mezzanine,fusionbox/mezzanine,agepoly/mezzanine,orlenko/sfpirg,dsanders11/mezzanine,wbtuomela/mezzanine,guibernardino/mezzanine,wbtuomela/mezzanine,viaregio/mezzanine,orlenko/plei,emile2016/mezzanine,dustinrb/mezzanine,webounty/mezzanine,douglaskastle/mezzanine,orlenko/plei,promil23/mezzanine,gradel/mezzanine,frankier/mezzanine,emile2016/mezzanine,Skytorn86/mezzanine,mush42/mezzanine,cccs-web/mezzanine,SoLoHiC/mezzanine,damnfine/mezzanine,douglaskastle/mezzanine,nikolas/mezzanine,PegasusWang/mezzanine,industrydive/mezzanine,spookylukey/mezzanine,Cicero-Zhao/mezzanine,PegasusWang/mezzanine,adrian-the-git/mezzanine,viaregio/mezzanine,fusionbox/mezzanine,eino-makitalo/mezzanine,jerivas/mezzanine,ryneeverett/mezzanine,dovydas/mezzanine,gbosh/mezzanine,emile2016/mezzanine,frankchin/mezzanine,dovydas/mezzanine,saintbird/mezzanine,ZeroXn/mezzanine,webounty/mezzanine,ryneeverett/mezzanine,jerivas/mezzanine,agepoly/mezzanine,stephenmcd/mezzanine,readevalprint/mezzanine,wrwrwr/mezzanine,gradel/mezzanine,theclanks/mezzanine,joshcartme/mezzanine,dustinrb/mezzanine,frankchin/mezzanine,Kniyl/mezzanine,tuxinhang1989/mezzanine,christianwgd/mezzanine,molokov/mezzanine,ryneeverett/mezzanine,stbarnabas/mezzanine,tuxinhang1989/mezzanine,sjdines/mezzanine,ZeroXn/mezzanine,viaregio/mezzanine,jjz/mezzanine,jjz/mezzanine,guibernardino/mezzanine,Cajoline/mezzanine,industrydive/mezzanine,sjuxax/mezzanine,tuxinhang1989/mezzanine,eino-makitalo/mezzanine,orlenko/plei,jjz/mezzanine,sjdines/mezzanine,gbosh/mezzanine,mush42/mezzanine,dustinrb/mezzanine,scarcry/snm-mezzanine,christianwgd/mezzanine,adrian-the-git/mezzanine,stephenmcd/mezzanine,promil23/mezzanine,spookylukey/mezzanine,wbtuomela/mezzanine,frankier/mezzanine,AlexHill/mezzanine
|
DATABASES = {
"default": {
# Ends with "postgresql_psycopg2", "mysql", "sqlite3" or "oracle".
"ENGINE": "django.db.backends.postgresql_psycopg2",
# DB name or path to database file if using sqlite3.
"NAME": "%(proj_name)s",
# Not used with sqlite3.
"USER": "%(proj_name)s",
# Not used with sqlite3.
"PASSWORD": "%(db_pass)s",
# Set to empty string for localhost. Not used with sqlite3.
"HOST": "127.0.0.1",
# Set to empty string for default. Not used with sqlite3.
"PORT": "",
}
}
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTOCOL", "https")
CACHE_MIDDLEWARE_SECONDS = 60
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.memcached.MemcachedCache",
"LOCATION": "127.0.0.1:11211",
}
}
Use cache backend for sessions in deployed settings.
|
DATABASES = {
"default": {
# Ends with "postgresql_psycopg2", "mysql", "sqlite3" or "oracle".
"ENGINE": "django.db.backends.postgresql_psycopg2",
# DB name or path to database file if using sqlite3.
"NAME": "%(proj_name)s",
# Not used with sqlite3.
"USER": "%(proj_name)s",
# Not used with sqlite3.
"PASSWORD": "%(db_pass)s",
# Set to empty string for localhost. Not used with sqlite3.
"HOST": "127.0.0.1",
# Set to empty string for default. Not used with sqlite3.
"PORT": "",
}
}
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTOCOL", "https")
CACHE_MIDDLEWARE_SECONDS = 60
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.memcached.MemcachedCache",
"LOCATION": "127.0.0.1:11211",
}
}
SESSION_ENGINE = "django.contrib.sessions.backends.cache"
|
<commit_before>
DATABASES = {
"default": {
# Ends with "postgresql_psycopg2", "mysql", "sqlite3" or "oracle".
"ENGINE": "django.db.backends.postgresql_psycopg2",
# DB name or path to database file if using sqlite3.
"NAME": "%(proj_name)s",
# Not used with sqlite3.
"USER": "%(proj_name)s",
# Not used with sqlite3.
"PASSWORD": "%(db_pass)s",
# Set to empty string for localhost. Not used with sqlite3.
"HOST": "127.0.0.1",
# Set to empty string for default. Not used with sqlite3.
"PORT": "",
}
}
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTOCOL", "https")
CACHE_MIDDLEWARE_SECONDS = 60
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.memcached.MemcachedCache",
"LOCATION": "127.0.0.1:11211",
}
}
<commit_msg>Use cache backend for sessions in deployed settings.<commit_after>
|
DATABASES = {
"default": {
# Ends with "postgresql_psycopg2", "mysql", "sqlite3" or "oracle".
"ENGINE": "django.db.backends.postgresql_psycopg2",
# DB name or path to database file if using sqlite3.
"NAME": "%(proj_name)s",
# Not used with sqlite3.
"USER": "%(proj_name)s",
# Not used with sqlite3.
"PASSWORD": "%(db_pass)s",
# Set to empty string for localhost. Not used with sqlite3.
"HOST": "127.0.0.1",
# Set to empty string for default. Not used with sqlite3.
"PORT": "",
}
}
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTOCOL", "https")
CACHE_MIDDLEWARE_SECONDS = 60
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.memcached.MemcachedCache",
"LOCATION": "127.0.0.1:11211",
}
}
SESSION_ENGINE = "django.contrib.sessions.backends.cache"
|
DATABASES = {
"default": {
# Ends with "postgresql_psycopg2", "mysql", "sqlite3" or "oracle".
"ENGINE": "django.db.backends.postgresql_psycopg2",
# DB name or path to database file if using sqlite3.
"NAME": "%(proj_name)s",
# Not used with sqlite3.
"USER": "%(proj_name)s",
# Not used with sqlite3.
"PASSWORD": "%(db_pass)s",
# Set to empty string for localhost. Not used with sqlite3.
"HOST": "127.0.0.1",
# Set to empty string for default. Not used with sqlite3.
"PORT": "",
}
}
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTOCOL", "https")
CACHE_MIDDLEWARE_SECONDS = 60
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.memcached.MemcachedCache",
"LOCATION": "127.0.0.1:11211",
}
}
Use cache backend for sessions in deployed settings.
DATABASES = {
"default": {
# Ends with "postgresql_psycopg2", "mysql", "sqlite3" or "oracle".
"ENGINE": "django.db.backends.postgresql_psycopg2",
# DB name or path to database file if using sqlite3.
"NAME": "%(proj_name)s",
# Not used with sqlite3.
"USER": "%(proj_name)s",
# Not used with sqlite3.
"PASSWORD": "%(db_pass)s",
# Set to empty string for localhost. Not used with sqlite3.
"HOST": "127.0.0.1",
# Set to empty string for default. Not used with sqlite3.
"PORT": "",
}
}
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTOCOL", "https")
CACHE_MIDDLEWARE_SECONDS = 60
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.memcached.MemcachedCache",
"LOCATION": "127.0.0.1:11211",
}
}
SESSION_ENGINE = "django.contrib.sessions.backends.cache"
|
<commit_before>
DATABASES = {
"default": {
# Ends with "postgresql_psycopg2", "mysql", "sqlite3" or "oracle".
"ENGINE": "django.db.backends.postgresql_psycopg2",
# DB name or path to database file if using sqlite3.
"NAME": "%(proj_name)s",
# Not used with sqlite3.
"USER": "%(proj_name)s",
# Not used with sqlite3.
"PASSWORD": "%(db_pass)s",
# Set to empty string for localhost. Not used with sqlite3.
"HOST": "127.0.0.1",
# Set to empty string for default. Not used with sqlite3.
"PORT": "",
}
}
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTOCOL", "https")
CACHE_MIDDLEWARE_SECONDS = 60
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.memcached.MemcachedCache",
"LOCATION": "127.0.0.1:11211",
}
}
<commit_msg>Use cache backend for sessions in deployed settings.<commit_after>
DATABASES = {
"default": {
# Ends with "postgresql_psycopg2", "mysql", "sqlite3" or "oracle".
"ENGINE": "django.db.backends.postgresql_psycopg2",
# DB name or path to database file if using sqlite3.
"NAME": "%(proj_name)s",
# Not used with sqlite3.
"USER": "%(proj_name)s",
# Not used with sqlite3.
"PASSWORD": "%(db_pass)s",
# Set to empty string for localhost. Not used with sqlite3.
"HOST": "127.0.0.1",
# Set to empty string for default. Not used with sqlite3.
"PORT": "",
}
}
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTOCOL", "https")
CACHE_MIDDLEWARE_SECONDS = 60
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.memcached.MemcachedCache",
"LOCATION": "127.0.0.1:11211",
}
}
SESSION_ENGINE = "django.contrib.sessions.backends.cache"
|
62545500553443863d61d9e5ecc80307c745a227
|
migrate/20110917T143029-remove-value-dimensions.py
|
migrate/20110917T143029-remove-value-dimensions.py
|
import logging
from openspending.lib import cubes
from openspending import migration, model, mongo
log = logging.getLogger(__name__)
def up():
group_args = ({'dataset':1}, {}, {'num': 0},
'function (x, acc) { acc.num += 1 }')
before = mongo.db.dimension.group(*group_args)
dims = model.dimension.find({'type': {'$nin': ['entity', 'classifier']}})
for d in dims:
log.info("Removing dimension: %s", d)
model.dimension.remove({'_id': d['_id']})
after = mongo.db.dimension.group(*group_args)
for bf, af in zip(before, after):
if int(bf['num']) != int(af['num']):
log.warn("Number of dimensions for dimension '%s' "
"changed. Recomputing cubes.", bf['dataset'])
ds = model.dataset.find_one({'name': bf['dataset']})
cubes.Cube.update_all_cubes(ds)
def down():
raise migration.IrreversibleMigrationError("Can't add back dimension "
"fields that we dropped!")
|
Add migration to remove non-{entity,classifier} dimensions from the database, and to recompute cubes if necessary
|
Add migration to remove non-{entity,classifier} dimensions from the database, and to recompute cubes if necessary
|
Python
|
agpl-3.0
|
CivicVision/datahub,openspending/spendb,johnjohndoe/spendb,USStateDept/FPA_Core,spendb/spendb,johnjohndoe/spendb,openspending/spendb,nathanhilbert/FPA_Core,pudo/spendb,openspending/spendb,nathanhilbert/FPA_Core,spendb/spendb,CivicVision/datahub,pudo/spendb,USStateDept/FPA_Core,pudo/spendb,nathanhilbert/FPA_Core,USStateDept/FPA_Core,CivicVision/datahub,johnjohndoe/spendb,spendb/spendb
|
Add migration to remove non-{entity,classifier} dimensions from the database, and to recompute cubes if necessary
|
import logging
from openspending.lib import cubes
from openspending import migration, model, mongo
log = logging.getLogger(__name__)
def up():
group_args = ({'dataset':1}, {}, {'num': 0},
'function (x, acc) { acc.num += 1 }')
before = mongo.db.dimension.group(*group_args)
dims = model.dimension.find({'type': {'$nin': ['entity', 'classifier']}})
for d in dims:
log.info("Removing dimension: %s", d)
model.dimension.remove({'_id': d['_id']})
after = mongo.db.dimension.group(*group_args)
for bf, af in zip(before, after):
if int(bf['num']) != int(af['num']):
log.warn("Number of dimensions for dimension '%s' "
"changed. Recomputing cubes.", bf['dataset'])
ds = model.dataset.find_one({'name': bf['dataset']})
cubes.Cube.update_all_cubes(ds)
def down():
raise migration.IrreversibleMigrationError("Can't add back dimension "
"fields that we dropped!")
|
<commit_before><commit_msg>Add migration to remove non-{entity,classifier} dimensions from the database, and to recompute cubes if necessary<commit_after>
|
import logging
from openspending.lib import cubes
from openspending import migration, model, mongo
log = logging.getLogger(__name__)
def up():
group_args = ({'dataset':1}, {}, {'num': 0},
'function (x, acc) { acc.num += 1 }')
before = mongo.db.dimension.group(*group_args)
dims = model.dimension.find({'type': {'$nin': ['entity', 'classifier']}})
for d in dims:
log.info("Removing dimension: %s", d)
model.dimension.remove({'_id': d['_id']})
after = mongo.db.dimension.group(*group_args)
for bf, af in zip(before, after):
if int(bf['num']) != int(af['num']):
log.warn("Number of dimensions for dimension '%s' "
"changed. Recomputing cubes.", bf['dataset'])
ds = model.dataset.find_one({'name': bf['dataset']})
cubes.Cube.update_all_cubes(ds)
def down():
raise migration.IrreversibleMigrationError("Can't add back dimension "
"fields that we dropped!")
|
Add migration to remove non-{entity,classifier} dimensions from the database, and to recompute cubes if necessaryimport logging
from openspending.lib import cubes
from openspending import migration, model, mongo
log = logging.getLogger(__name__)
def up():
group_args = ({'dataset':1}, {}, {'num': 0},
'function (x, acc) { acc.num += 1 }')
before = mongo.db.dimension.group(*group_args)
dims = model.dimension.find({'type': {'$nin': ['entity', 'classifier']}})
for d in dims:
log.info("Removing dimension: %s", d)
model.dimension.remove({'_id': d['_id']})
after = mongo.db.dimension.group(*group_args)
for bf, af in zip(before, after):
if int(bf['num']) != int(af['num']):
log.warn("Number of dimensions for dimension '%s' "
"changed. Recomputing cubes.", bf['dataset'])
ds = model.dataset.find_one({'name': bf['dataset']})
cubes.Cube.update_all_cubes(ds)
def down():
raise migration.IrreversibleMigrationError("Can't add back dimension "
"fields that we dropped!")
|
<commit_before><commit_msg>Add migration to remove non-{entity,classifier} dimensions from the database, and to recompute cubes if necessary<commit_after>import logging
from openspending.lib import cubes
from openspending import migration, model, mongo
log = logging.getLogger(__name__)
def up():
group_args = ({'dataset':1}, {}, {'num': 0},
'function (x, acc) { acc.num += 1 }')
before = mongo.db.dimension.group(*group_args)
dims = model.dimension.find({'type': {'$nin': ['entity', 'classifier']}})
for d in dims:
log.info("Removing dimension: %s", d)
model.dimension.remove({'_id': d['_id']})
after = mongo.db.dimension.group(*group_args)
for bf, af in zip(before, after):
if int(bf['num']) != int(af['num']):
log.warn("Number of dimensions for dimension '%s' "
"changed. Recomputing cubes.", bf['dataset'])
ds = model.dataset.find_one({'name': bf['dataset']})
cubes.Cube.update_all_cubes(ds)
def down():
raise migration.IrreversibleMigrationError("Can't add back dimension "
"fields that we dropped!")
|
|
c599b5d470cf80b964af1b261a11540516e120df
|
galpy/potential_src/DehnenSmoothWrapperPotential.py
|
galpy/potential_src/DehnenSmoothWrapperPotential.py
|
###############################################################################
# DehnenSmoothWrapperPotential.py: Wrapper to smoothly grow a potential
###############################################################################
from galpy.potential_src.WrapperPotential import SimpleWrapperPotential
class DehnenSmoothWrapperPotential(SimpleWrapperPotential):
def __init__(self,amp=1.,pot=None,tform=-4.,tsteady=None,ro=None,vo=None):
"""
NAME:
__init__
PURPOSE:
initialize a DehnenSmoothWrapper Potential
INPUT:
amp - amplitude to be applied to the potential (default: 1.)
pot - Potential instance or list thereof; the amplitude of this will be grown by this wrapper
tform - start of growth
tsteady - time from tform at which the potential is fully grown (default: -tform/2, st the perturbation is fully grown at tform/2)
OUTPUT:
(none)
HISTORY:
2017-06-26 - Started - Bovy (UofT)
"""
SimpleWrapperPotential.__init__(self,amp=amp,pot=pot,ro=ro,vo=vo)
self._tform= tform
if tsteady is None:
self._tsteady= self._tform/2.
else:
self._tsteady= self._tform+tsteady
self.hasC= False
self.hasC_dxdv= False
def _smooth(self,t):
#Calculate relevant time
if t < self._tform:
smooth= 0.
elif t < self._tsteady:
deltat= t-self._tform
xi= 2.*deltat/(self._tsteady-self._tform)-1.
smooth= (3./16.*xi**5.-5./8*xi**3.+15./16.*xi+.5)
else: #bar is fully on
smooth= 1.
return smooth
def _wrap(self,attribute,R,Z,phi=0.,t=0.):
return self._smooth(t)\
*self._wrap_pot_func(attribute)(self._pot,R,Z,phi=phi,t=t)
|
Add Dehnen smoothing as a wrapper
|
Add Dehnen smoothing as a wrapper
|
Python
|
bsd-3-clause
|
jobovy/galpy,jobovy/galpy,jobovy/galpy,jobovy/galpy
|
Add Dehnen smoothing as a wrapper
|
###############################################################################
# DehnenSmoothWrapperPotential.py: Wrapper to smoothly grow a potential
###############################################################################
from galpy.potential_src.WrapperPotential import SimpleWrapperPotential
class DehnenSmoothWrapperPotential(SimpleWrapperPotential):
def __init__(self,amp=1.,pot=None,tform=-4.,tsteady=None,ro=None,vo=None):
"""
NAME:
__init__
PURPOSE:
initialize a DehnenSmoothWrapper Potential
INPUT:
amp - amplitude to be applied to the potential (default: 1.)
pot - Potential instance or list thereof; the amplitude of this will be grown by this wrapper
tform - start of growth
tsteady - time from tform at which the potential is fully grown (default: -tform/2, st the perturbation is fully grown at tform/2)
OUTPUT:
(none)
HISTORY:
2017-06-26 - Started - Bovy (UofT)
"""
SimpleWrapperPotential.__init__(self,amp=amp,pot=pot,ro=ro,vo=vo)
self._tform= tform
if tsteady is None:
self._tsteady= self._tform/2.
else:
self._tsteady= self._tform+tsteady
self.hasC= False
self.hasC_dxdv= False
def _smooth(self,t):
#Calculate relevant time
if t < self._tform:
smooth= 0.
elif t < self._tsteady:
deltat= t-self._tform
xi= 2.*deltat/(self._tsteady-self._tform)-1.
smooth= (3./16.*xi**5.-5./8*xi**3.+15./16.*xi+.5)
else: #bar is fully on
smooth= 1.
return smooth
def _wrap(self,attribute,R,Z,phi=0.,t=0.):
return self._smooth(t)\
*self._wrap_pot_func(attribute)(self._pot,R,Z,phi=phi,t=t)
|
<commit_before><commit_msg>Add Dehnen smoothing as a wrapper<commit_after>
|
###############################################################################
# DehnenSmoothWrapperPotential.py: Wrapper to smoothly grow a potential
###############################################################################
from galpy.potential_src.WrapperPotential import SimpleWrapperPotential
class DehnenSmoothWrapperPotential(SimpleWrapperPotential):
def __init__(self,amp=1.,pot=None,tform=-4.,tsteady=None,ro=None,vo=None):
"""
NAME:
__init__
PURPOSE:
initialize a DehnenSmoothWrapper Potential
INPUT:
amp - amplitude to be applied to the potential (default: 1.)
pot - Potential instance or list thereof; the amplitude of this will be grown by this wrapper
tform - start of growth
tsteady - time from tform at which the potential is fully grown (default: -tform/2, st the perturbation is fully grown at tform/2)
OUTPUT:
(none)
HISTORY:
2017-06-26 - Started - Bovy (UofT)
"""
SimpleWrapperPotential.__init__(self,amp=amp,pot=pot,ro=ro,vo=vo)
self._tform= tform
if tsteady is None:
self._tsteady= self._tform/2.
else:
self._tsteady= self._tform+tsteady
self.hasC= False
self.hasC_dxdv= False
def _smooth(self,t):
#Calculate relevant time
if t < self._tform:
smooth= 0.
elif t < self._tsteady:
deltat= t-self._tform
xi= 2.*deltat/(self._tsteady-self._tform)-1.
smooth= (3./16.*xi**5.-5./8*xi**3.+15./16.*xi+.5)
else: #bar is fully on
smooth= 1.
return smooth
def _wrap(self,attribute,R,Z,phi=0.,t=0.):
return self._smooth(t)\
*self._wrap_pot_func(attribute)(self._pot,R,Z,phi=phi,t=t)
|
Add Dehnen smoothing as a wrapper###############################################################################
# DehnenSmoothWrapperPotential.py: Wrapper to smoothly grow a potential
###############################################################################
from galpy.potential_src.WrapperPotential import SimpleWrapperPotential
class DehnenSmoothWrapperPotential(SimpleWrapperPotential):
def __init__(self,amp=1.,pot=None,tform=-4.,tsteady=None,ro=None,vo=None):
"""
NAME:
__init__
PURPOSE:
initialize a DehnenSmoothWrapper Potential
INPUT:
amp - amplitude to be applied to the potential (default: 1.)
pot - Potential instance or list thereof; the amplitude of this will be grown by this wrapper
tform - start of growth
tsteady - time from tform at which the potential is fully grown (default: -tform/2, st the perturbation is fully grown at tform/2)
OUTPUT:
(none)
HISTORY:
2017-06-26 - Started - Bovy (UofT)
"""
SimpleWrapperPotential.__init__(self,amp=amp,pot=pot,ro=ro,vo=vo)
self._tform= tform
if tsteady is None:
self._tsteady= self._tform/2.
else:
self._tsteady= self._tform+tsteady
self.hasC= False
self.hasC_dxdv= False
def _smooth(self,t):
#Calculate relevant time
if t < self._tform:
smooth= 0.
elif t < self._tsteady:
deltat= t-self._tform
xi= 2.*deltat/(self._tsteady-self._tform)-1.
smooth= (3./16.*xi**5.-5./8*xi**3.+15./16.*xi+.5)
else: #bar is fully on
smooth= 1.
return smooth
def _wrap(self,attribute,R,Z,phi=0.,t=0.):
return self._smooth(t)\
*self._wrap_pot_func(attribute)(self._pot,R,Z,phi=phi,t=t)
|
<commit_before><commit_msg>Add Dehnen smoothing as a wrapper<commit_after>###############################################################################
# DehnenSmoothWrapperPotential.py: Wrapper to smoothly grow a potential
###############################################################################
from galpy.potential_src.WrapperPotential import SimpleWrapperPotential
class DehnenSmoothWrapperPotential(SimpleWrapperPotential):
def __init__(self,amp=1.,pot=None,tform=-4.,tsteady=None,ro=None,vo=None):
"""
NAME:
__init__
PURPOSE:
initialize a DehnenSmoothWrapper Potential
INPUT:
amp - amplitude to be applied to the potential (default: 1.)
pot - Potential instance or list thereof; the amplitude of this will be grown by this wrapper
tform - start of growth
tsteady - time from tform at which the potential is fully grown (default: -tform/2, st the perturbation is fully grown at tform/2)
OUTPUT:
(none)
HISTORY:
2017-06-26 - Started - Bovy (UofT)
"""
SimpleWrapperPotential.__init__(self,amp=amp,pot=pot,ro=ro,vo=vo)
self._tform= tform
if tsteady is None:
self._tsteady= self._tform/2.
else:
self._tsteady= self._tform+tsteady
self.hasC= False
self.hasC_dxdv= False
def _smooth(self,t):
#Calculate relevant time
if t < self._tform:
smooth= 0.
elif t < self._tsteady:
deltat= t-self._tform
xi= 2.*deltat/(self._tsteady-self._tform)-1.
smooth= (3./16.*xi**5.-5./8*xi**3.+15./16.*xi+.5)
else: #bar is fully on
smooth= 1.
return smooth
def _wrap(self,attribute,R,Z,phi=0.,t=0.):
return self._smooth(t)\
*self._wrap_pot_func(attribute)(self._pot,R,Z,phi=phi,t=t)
|
|
bb7bb2e12d3ccbb55f0b0e6db5d0cb79c3ea8079
|
km_api/know_me/migrations/0013_remove_profileitem_media_resource.py
|
km_api/know_me/migrations/0013_remove_profileitem_media_resource.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-01 14:16
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('know_me', '0012_emergencyitem'),
]
operations = [
migrations.RemoveField(
model_name='profileitem',
name='media_resource',
),
]
|
Add missing migration for profile items.
|
Add missing migration for profile items.
When we moved profile item image content into a separate model, we
missed this migration when rebasing.
|
Python
|
apache-2.0
|
knowmetools/km-api,knowmetools/km-api,knowmetools/km-api,knowmetools/km-api
|
Add missing migration for profile items.
When we moved profile item image content into a separate model, we
missed this migration when rebasing.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-01 14:16
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('know_me', '0012_emergencyitem'),
]
operations = [
migrations.RemoveField(
model_name='profileitem',
name='media_resource',
),
]
|
<commit_before><commit_msg>Add missing migration for profile items.
When we moved profile item image content into a separate model, we
missed this migration when rebasing.<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-01 14:16
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('know_me', '0012_emergencyitem'),
]
operations = [
migrations.RemoveField(
model_name='profileitem',
name='media_resource',
),
]
|
Add missing migration for profile items.
When we moved profile item image content into a separate model, we
missed this migration when rebasing.# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-01 14:16
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('know_me', '0012_emergencyitem'),
]
operations = [
migrations.RemoveField(
model_name='profileitem',
name='media_resource',
),
]
|
<commit_before><commit_msg>Add missing migration for profile items.
When we moved profile item image content into a separate model, we
missed this migration when rebasing.<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-01 14:16
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('know_me', '0012_emergencyitem'),
]
operations = [
migrations.RemoveField(
model_name='profileitem',
name='media_resource',
),
]
|
|
26fcbefee171f8d56504a7eba121027f0c5be8b5
|
lms/djangoapps/grades/migrations/0013_persistentsubsectiongradeoverride.py
|
lms/djangoapps/grades/migrations/0013_persistentsubsectiongradeoverride.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('grades', '0012_computegradessetting'),
]
operations = [
migrations.CreateModel(
name='PersistentSubsectionGradeOverride',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True, db_index=True)),
('modified', models.DateTimeField(auto_now=True, db_index=True)),
('earned_all_override', models.FloatField(null=True, blank=True)),
('possible_all_override', models.FloatField(null=True, blank=True)),
('earned_graded_override', models.FloatField(null=True, blank=True)),
('possible_graded_override', models.FloatField(null=True, blank=True)),
('grade', models.OneToOneField(related_name='override', to='grades.PersistentSubsectionGrade')),
],
),
]
|
Add migration for new overrides table
|
Add migration for new overrides table
|
Python
|
agpl-3.0
|
Lektorium-LLC/edx-platform,CredoReference/edx-platform,arbrandes/edx-platform,msegado/edx-platform,pabloborrego93/edx-platform,edx-solutions/edx-platform,TeachAtTUM/edx-platform,gsehub/edx-platform,TeachAtTUM/edx-platform,proversity-org/edx-platform,Lektorium-LLC/edx-platform,stvstnfrd/edx-platform,proversity-org/edx-platform,gymnasium/edx-platform,lduarte1991/edx-platform,msegado/edx-platform,angelapper/edx-platform,eduNEXT/edx-platform,ahmedaljazzar/edx-platform,arbrandes/edx-platform,mitocw/edx-platform,lduarte1991/edx-platform,Stanford-Online/edx-platform,jolyonb/edx-platform,eduNEXT/edx-platform,philanthropy-u/edx-platform,procangroup/edx-platform,pabloborrego93/edx-platform,Edraak/edraak-platform,Edraak/edraak-platform,EDUlib/edx-platform,a-parhom/edx-platform,appsembler/edx-platform,arbrandes/edx-platform,teltek/edx-platform,appsembler/edx-platform,teltek/edx-platform,philanthropy-u/edx-platform,gsehub/edx-platform,eduNEXT/edx-platform,eduNEXT/edunext-platform,gymnasium/edx-platform,ahmedaljazzar/edx-platform,teltek/edx-platform,edx/edx-platform,TeachAtTUM/edx-platform,edx/edx-platform,cpennington/edx-platform,TeachAtTUM/edx-platform,edx-solutions/edx-platform,ahmedaljazzar/edx-platform,eduNEXT/edx-platform,edx/edx-platform,ESOedX/edx-platform,gymnasium/edx-platform,ESOedX/edx-platform,gsehub/edx-platform,jolyonb/edx-platform,eduNEXT/edunext-platform,BehavioralInsightsTeam/edx-platform,teltek/edx-platform,cpennington/edx-platform,edx-solutions/edx-platform,ESOedX/edx-platform,msegado/edx-platform,ahmedaljazzar/edx-platform,eduNEXT/edunext-platform,hastexo/edx-platform,philanthropy-u/edx-platform,Stanford-Online/edx-platform,hastexo/edx-platform,hastexo/edx-platform,pabloborrego93/edx-platform,angelapper/edx-platform,mitocw/edx-platform,hastexo/edx-platform,lduarte1991/edx-platform,cpennington/edx-platform,BehavioralInsightsTeam/edx-platform,Lektorium-LLC/edx-platform,jolyonb/edx-platform,Edraak/edraak-platform,lduarte1991/edx-platform,BehavioralInsightsTeam/edx-platform,philanthropy-u/edx-platform,pabloborrego93/edx-platform,a-parhom/edx-platform,Stanford-Online/edx-platform,ESOedX/edx-platform,a-parhom/edx-platform,mitocw/edx-platform,stvstnfrd/edx-platform,jolyonb/edx-platform,appsembler/edx-platform,cpennington/edx-platform,eduNEXT/edunext-platform,EDUlib/edx-platform,kmoocdev2/edx-platform,a-parhom/edx-platform,mitocw/edx-platform,Edraak/edraak-platform,msegado/edx-platform,appsembler/edx-platform,angelapper/edx-platform,msegado/edx-platform,gymnasium/edx-platform,CredoReference/edx-platform,proversity-org/edx-platform,edx-solutions/edx-platform,stvstnfrd/edx-platform,proversity-org/edx-platform,edx/edx-platform,procangroup/edx-platform,Lektorium-LLC/edx-platform,stvstnfrd/edx-platform,angelapper/edx-platform,arbrandes/edx-platform,EDUlib/edx-platform,BehavioralInsightsTeam/edx-platform,kmoocdev2/edx-platform,kmoocdev2/edx-platform,EDUlib/edx-platform,CredoReference/edx-platform,CredoReference/edx-platform,procangroup/edx-platform,procangroup/edx-platform,Stanford-Online/edx-platform,kmoocdev2/edx-platform,gsehub/edx-platform,kmoocdev2/edx-platform
|
Add migration for new overrides table
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('grades', '0012_computegradessetting'),
]
operations = [
migrations.CreateModel(
name='PersistentSubsectionGradeOverride',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True, db_index=True)),
('modified', models.DateTimeField(auto_now=True, db_index=True)),
('earned_all_override', models.FloatField(null=True, blank=True)),
('possible_all_override', models.FloatField(null=True, blank=True)),
('earned_graded_override', models.FloatField(null=True, blank=True)),
('possible_graded_override', models.FloatField(null=True, blank=True)),
('grade', models.OneToOneField(related_name='override', to='grades.PersistentSubsectionGrade')),
],
),
]
|
<commit_before><commit_msg>Add migration for new overrides table<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('grades', '0012_computegradessetting'),
]
operations = [
migrations.CreateModel(
name='PersistentSubsectionGradeOverride',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True, db_index=True)),
('modified', models.DateTimeField(auto_now=True, db_index=True)),
('earned_all_override', models.FloatField(null=True, blank=True)),
('possible_all_override', models.FloatField(null=True, blank=True)),
('earned_graded_override', models.FloatField(null=True, blank=True)),
('possible_graded_override', models.FloatField(null=True, blank=True)),
('grade', models.OneToOneField(related_name='override', to='grades.PersistentSubsectionGrade')),
],
),
]
|
Add migration for new overrides table# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('grades', '0012_computegradessetting'),
]
operations = [
migrations.CreateModel(
name='PersistentSubsectionGradeOverride',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True, db_index=True)),
('modified', models.DateTimeField(auto_now=True, db_index=True)),
('earned_all_override', models.FloatField(null=True, blank=True)),
('possible_all_override', models.FloatField(null=True, blank=True)),
('earned_graded_override', models.FloatField(null=True, blank=True)),
('possible_graded_override', models.FloatField(null=True, blank=True)),
('grade', models.OneToOneField(related_name='override', to='grades.PersistentSubsectionGrade')),
],
),
]
|
<commit_before><commit_msg>Add migration for new overrides table<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('grades', '0012_computegradessetting'),
]
operations = [
migrations.CreateModel(
name='PersistentSubsectionGradeOverride',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True, db_index=True)),
('modified', models.DateTimeField(auto_now=True, db_index=True)),
('earned_all_override', models.FloatField(null=True, blank=True)),
('possible_all_override', models.FloatField(null=True, blank=True)),
('earned_graded_override', models.FloatField(null=True, blank=True)),
('possible_graded_override', models.FloatField(null=True, blank=True)),
('grade', models.OneToOneField(related_name='override', to='grades.PersistentSubsectionGrade')),
],
),
]
|
|
aff827e9cc02bcee6cf8687e1dff65f39daaf6c6
|
workshops/test/test_landing_page.py
|
workshops/test/test_landing_page.py
|
from django.core.urlresolvers import reverse
from django.test import TestCase
from mock import patch
from datetime import date
class FakeDate(date):
"A fake replacement for date that can be mocked for testing."
pass
@classmethod
def today(cls):
return cls(2013, 12, 7)
@patch('workshops.models.datetime.date', FakeDate)
class TestLandingPage(TestCase):
"Tests for the workshop landing page"
fixtures = ['event_test']
def test_has_upcoming_events(self):
"""Test that the landing page is passed some
upcoming_events in the context.
"""
response = self.client.get(reverse('index'))
# This will fail if the context variable doesn't exist
upcoming_events = response.context['upcoming_events']
# There are 2 upcoming events
assert len(upcoming_events) == 2
# They should all start with upcoming
assert all([e.slug[:8] == 'upcoming' for e in upcoming_events])
|
Add a failing test to the landing page to check for upcoming events.
|
Add a failing test to the landing page to check for upcoming events.
|
Python
|
mit
|
shapiromatron/amy,pbanaszkiewicz/amy,swcarpentry/amy,vahtras/amy,pbanaszkiewicz/amy,pbanaszkiewicz/amy,wking/swc-amy,vahtras/amy,shapiromatron/amy,swcarpentry/amy,swcarpentry/amy,wking/swc-amy,wking/swc-amy,shapiromatron/amy,vahtras/amy,wking/swc-amy
|
Add a failing test to the landing page to check for upcoming events.
|
from django.core.urlresolvers import reverse
from django.test import TestCase
from mock import patch
from datetime import date
class FakeDate(date):
"A fake replacement for date that can be mocked for testing."
pass
@classmethod
def today(cls):
return cls(2013, 12, 7)
@patch('workshops.models.datetime.date', FakeDate)
class TestLandingPage(TestCase):
"Tests for the workshop landing page"
fixtures = ['event_test']
def test_has_upcoming_events(self):
"""Test that the landing page is passed some
upcoming_events in the context.
"""
response = self.client.get(reverse('index'))
# This will fail if the context variable doesn't exist
upcoming_events = response.context['upcoming_events']
# There are 2 upcoming events
assert len(upcoming_events) == 2
# They should all start with upcoming
assert all([e.slug[:8] == 'upcoming' for e in upcoming_events])
|
<commit_before><commit_msg>Add a failing test to the landing page to check for upcoming events.<commit_after>
|
from django.core.urlresolvers import reverse
from django.test import TestCase
from mock import patch
from datetime import date
class FakeDate(date):
"A fake replacement for date that can be mocked for testing."
pass
@classmethod
def today(cls):
return cls(2013, 12, 7)
@patch('workshops.models.datetime.date', FakeDate)
class TestLandingPage(TestCase):
"Tests for the workshop landing page"
fixtures = ['event_test']
def test_has_upcoming_events(self):
"""Test that the landing page is passed some
upcoming_events in the context.
"""
response = self.client.get(reverse('index'))
# This will fail if the context variable doesn't exist
upcoming_events = response.context['upcoming_events']
# There are 2 upcoming events
assert len(upcoming_events) == 2
# They should all start with upcoming
assert all([e.slug[:8] == 'upcoming' for e in upcoming_events])
|
Add a failing test to the landing page to check for upcoming events.from django.core.urlresolvers import reverse
from django.test import TestCase
from mock import patch
from datetime import date
class FakeDate(date):
"A fake replacement for date that can be mocked for testing."
pass
@classmethod
def today(cls):
return cls(2013, 12, 7)
@patch('workshops.models.datetime.date', FakeDate)
class TestLandingPage(TestCase):
"Tests for the workshop landing page"
fixtures = ['event_test']
def test_has_upcoming_events(self):
"""Test that the landing page is passed some
upcoming_events in the context.
"""
response = self.client.get(reverse('index'))
# This will fail if the context variable doesn't exist
upcoming_events = response.context['upcoming_events']
# There are 2 upcoming events
assert len(upcoming_events) == 2
# They should all start with upcoming
assert all([e.slug[:8] == 'upcoming' for e in upcoming_events])
|
<commit_before><commit_msg>Add a failing test to the landing page to check for upcoming events.<commit_after>from django.core.urlresolvers import reverse
from django.test import TestCase
from mock import patch
from datetime import date
class FakeDate(date):
"A fake replacement for date that can be mocked for testing."
pass
@classmethod
def today(cls):
return cls(2013, 12, 7)
@patch('workshops.models.datetime.date', FakeDate)
class TestLandingPage(TestCase):
"Tests for the workshop landing page"
fixtures = ['event_test']
def test_has_upcoming_events(self):
"""Test that the landing page is passed some
upcoming_events in the context.
"""
response = self.client.get(reverse('index'))
# This will fail if the context variable doesn't exist
upcoming_events = response.context['upcoming_events']
# There are 2 upcoming events
assert len(upcoming_events) == 2
# They should all start with upcoming
assert all([e.slug[:8] == 'upcoming' for e in upcoming_events])
|
|
ea26478495d5aec6925e32c9a87245bf2e1e4bc8
|
rps/errors.py
|
rps/errors.py
|
gestures = ["rock", "paper", "scissors"]
def verify_move(player_move):
if player_move not in gestures:
raise Exception("Wrong input!")
return player_move
# let's catch an exception
try:
player_move = verify_move(input("[rock,paper,scissors]: "))
print("The move was correct.")
except Exception:
print("The move was incorrect and Exception was raised.")
|
Add script demonstrating raising and catching Exceptions.
|
Add script demonstrating raising and catching Exceptions.
|
Python
|
mit
|
kubkon/ee106-additional-material
|
Add script demonstrating raising and catching Exceptions.
|
gestures = ["rock", "paper", "scissors"]
def verify_move(player_move):
if player_move not in gestures:
raise Exception("Wrong input!")
return player_move
# let's catch an exception
try:
player_move = verify_move(input("[rock,paper,scissors]: "))
print("The move was correct.")
except Exception:
print("The move was incorrect and Exception was raised.")
|
<commit_before><commit_msg>Add script demonstrating raising and catching Exceptions.<commit_after>
|
gestures = ["rock", "paper", "scissors"]
def verify_move(player_move):
if player_move not in gestures:
raise Exception("Wrong input!")
return player_move
# let's catch an exception
try:
player_move = verify_move(input("[rock,paper,scissors]: "))
print("The move was correct.")
except Exception:
print("The move was incorrect and Exception was raised.")
|
Add script demonstrating raising and catching Exceptions.gestures = ["rock", "paper", "scissors"]
def verify_move(player_move):
if player_move not in gestures:
raise Exception("Wrong input!")
return player_move
# let's catch an exception
try:
player_move = verify_move(input("[rock,paper,scissors]: "))
print("The move was correct.")
except Exception:
print("The move was incorrect and Exception was raised.")
|
<commit_before><commit_msg>Add script demonstrating raising and catching Exceptions.<commit_after>gestures = ["rock", "paper", "scissors"]
def verify_move(player_move):
if player_move not in gestures:
raise Exception("Wrong input!")
return player_move
# let's catch an exception
try:
player_move = verify_move(input("[rock,paper,scissors]: "))
print("The move was correct.")
except Exception:
print("The move was incorrect and Exception was raised.")
|
|
947c9ef100686fa1ec0acaa10bc49bf6c785665b
|
ffflash/container.py
|
ffflash/container.py
|
from os import path
from ffflash import RELEASE, log, now, timeout
from ffflash.lib.clock import epoch_repr
from ffflash.lib.data import merge_dicts
from ffflash.lib.files import read_json_file, write_json_file
class Container:
def __init__(self, spec, filename):
self._spec = spec
self._location = path.abspath(filename)
self.data = read_json_file(self._location, fallback={})
self._info()
def _info(self, info={}):
self.data['_info'] = self.data.get('_info', {})
self.data['_info']['generator'] = RELEASE
self.data['_info']['access'] = self.data['_info'].get('access', {})
if not self.data['_info']['access'].get('first', False):
self.data['_info']['access']['first'] = now
self.data['_info']['access']['last'] = now
self.data['_info']['access']['overall'] = epoch_repr(
abs(now - self.data['_info']['access']['first']),
ms=True
)
self.data['_info']['access']['timeout'] = timeout
if info:
self.data['_info'] = merge_dicts(self.data['_info'], info)
def save(self, info={}):
self._info(info)
if write_json_file(self._location, self.data):
log.info('{} saved {}'.format(self._spec, self._location))
|
Use unified class for json output
|
Use unified class for json output
|
Python
|
bsd-3-clause
|
spookey/ffflash,spookey/ffflash
|
Use unified class for json output
|
from os import path
from ffflash import RELEASE, log, now, timeout
from ffflash.lib.clock import epoch_repr
from ffflash.lib.data import merge_dicts
from ffflash.lib.files import read_json_file, write_json_file
class Container:
def __init__(self, spec, filename):
self._spec = spec
self._location = path.abspath(filename)
self.data = read_json_file(self._location, fallback={})
self._info()
def _info(self, info={}):
self.data['_info'] = self.data.get('_info', {})
self.data['_info']['generator'] = RELEASE
self.data['_info']['access'] = self.data['_info'].get('access', {})
if not self.data['_info']['access'].get('first', False):
self.data['_info']['access']['first'] = now
self.data['_info']['access']['last'] = now
self.data['_info']['access']['overall'] = epoch_repr(
abs(now - self.data['_info']['access']['first']),
ms=True
)
self.data['_info']['access']['timeout'] = timeout
if info:
self.data['_info'] = merge_dicts(self.data['_info'], info)
def save(self, info={}):
self._info(info)
if write_json_file(self._location, self.data):
log.info('{} saved {}'.format(self._spec, self._location))
|
<commit_before><commit_msg>Use unified class for json output<commit_after>
|
from os import path
from ffflash import RELEASE, log, now, timeout
from ffflash.lib.clock import epoch_repr
from ffflash.lib.data import merge_dicts
from ffflash.lib.files import read_json_file, write_json_file
class Container:
def __init__(self, spec, filename):
self._spec = spec
self._location = path.abspath(filename)
self.data = read_json_file(self._location, fallback={})
self._info()
def _info(self, info={}):
self.data['_info'] = self.data.get('_info', {})
self.data['_info']['generator'] = RELEASE
self.data['_info']['access'] = self.data['_info'].get('access', {})
if not self.data['_info']['access'].get('first', False):
self.data['_info']['access']['first'] = now
self.data['_info']['access']['last'] = now
self.data['_info']['access']['overall'] = epoch_repr(
abs(now - self.data['_info']['access']['first']),
ms=True
)
self.data['_info']['access']['timeout'] = timeout
if info:
self.data['_info'] = merge_dicts(self.data['_info'], info)
def save(self, info={}):
self._info(info)
if write_json_file(self._location, self.data):
log.info('{} saved {}'.format(self._spec, self._location))
|
Use unified class for json outputfrom os import path
from ffflash import RELEASE, log, now, timeout
from ffflash.lib.clock import epoch_repr
from ffflash.lib.data import merge_dicts
from ffflash.lib.files import read_json_file, write_json_file
class Container:
def __init__(self, spec, filename):
self._spec = spec
self._location = path.abspath(filename)
self.data = read_json_file(self._location, fallback={})
self._info()
def _info(self, info={}):
self.data['_info'] = self.data.get('_info', {})
self.data['_info']['generator'] = RELEASE
self.data['_info']['access'] = self.data['_info'].get('access', {})
if not self.data['_info']['access'].get('first', False):
self.data['_info']['access']['first'] = now
self.data['_info']['access']['last'] = now
self.data['_info']['access']['overall'] = epoch_repr(
abs(now - self.data['_info']['access']['first']),
ms=True
)
self.data['_info']['access']['timeout'] = timeout
if info:
self.data['_info'] = merge_dicts(self.data['_info'], info)
def save(self, info={}):
self._info(info)
if write_json_file(self._location, self.data):
log.info('{} saved {}'.format(self._spec, self._location))
|
<commit_before><commit_msg>Use unified class for json output<commit_after>from os import path
from ffflash import RELEASE, log, now, timeout
from ffflash.lib.clock import epoch_repr
from ffflash.lib.data import merge_dicts
from ffflash.lib.files import read_json_file, write_json_file
class Container:
def __init__(self, spec, filename):
self._spec = spec
self._location = path.abspath(filename)
self.data = read_json_file(self._location, fallback={})
self._info()
def _info(self, info={}):
self.data['_info'] = self.data.get('_info', {})
self.data['_info']['generator'] = RELEASE
self.data['_info']['access'] = self.data['_info'].get('access', {})
if not self.data['_info']['access'].get('first', False):
self.data['_info']['access']['first'] = now
self.data['_info']['access']['last'] = now
self.data['_info']['access']['overall'] = epoch_repr(
abs(now - self.data['_info']['access']['first']),
ms=True
)
self.data['_info']['access']['timeout'] = timeout
if info:
self.data['_info'] = merge_dicts(self.data['_info'], info)
def save(self, info={}):
self._info(info)
if write_json_file(self._location, self.data):
log.info('{} saved {}'.format(self._spec, self._location))
|
|
c206969facfc0e46d7ec4d3f60ce2e6a07956dbd
|
14B-088/HI/analysis/run_filfinder.py
|
14B-088/HI/analysis/run_filfinder.py
|
from fil_finder import fil_finder_2D
from basics import BubbleFinder2D
from spectral_cube.lower_dimensional_structures import Projection
from astropy.io import fits
from radio_beam import Beam
from astropy.wcs import WCS
import astropy.units as u
import matplotlib.pyplot as p
'''
Filaments in M33? Why not?
'''
mom0_fits = fits.open("/home/eric/MyRAID/M33/14B-088/HI/full_imaging/M33_14B-088_HI.clean.image.pbcov_gt_0.3_masked.mom0.fits")[0]
mom0 = Projection(mom0_fits.data, wcs=WCS(mom0_fits.header))
mom0.meta['beam'] = Beam.from_fits_header(mom0_fits.header)
# Create the bubble mask instead of letting FilFinder to do it.
bub = BubbleFinder2D(mom0, sigma=80.)
fils = fil_finder_2D(mom0.value, mom0.header, 10, distance=0.84e6)
fils.mask = ~(bub.mask.copy())
fils.medskel()
fils.analyze_skeletons()
# So at least on of the radial profiles fails. BUT the second fit is to a
# skeleton that is essentially the entire disk, so plot without interactivity
# and save the plot and the parameters shown in verbose mode.
p.ioff()
fils.find_widths(verbose=True, max_distance=500, auto_cut=False, try_nonparam=False)
# Fit Parameters: [ 541.31726502 129.85351117 180.0710914 304.01262168
# Fit Errors: [ 0.89151974 0.48394493 0.27313627 1.1462345 ]
|
Use filfinder to get the average radial width of features in the moment 0
|
Use filfinder to get the average radial width of features in the moment 0
|
Python
|
mit
|
e-koch/VLA_Lband,e-koch/VLA_Lband
|
Use filfinder to get the average radial width of features in the moment 0
|
from fil_finder import fil_finder_2D
from basics import BubbleFinder2D
from spectral_cube.lower_dimensional_structures import Projection
from astropy.io import fits
from radio_beam import Beam
from astropy.wcs import WCS
import astropy.units as u
import matplotlib.pyplot as p
'''
Filaments in M33? Why not?
'''
mom0_fits = fits.open("/home/eric/MyRAID/M33/14B-088/HI/full_imaging/M33_14B-088_HI.clean.image.pbcov_gt_0.3_masked.mom0.fits")[0]
mom0 = Projection(mom0_fits.data, wcs=WCS(mom0_fits.header))
mom0.meta['beam'] = Beam.from_fits_header(mom0_fits.header)
# Create the bubble mask instead of letting FilFinder to do it.
bub = BubbleFinder2D(mom0, sigma=80.)
fils = fil_finder_2D(mom0.value, mom0.header, 10, distance=0.84e6)
fils.mask = ~(bub.mask.copy())
fils.medskel()
fils.analyze_skeletons()
# So at least on of the radial profiles fails. BUT the second fit is to a
# skeleton that is essentially the entire disk, so plot without interactivity
# and save the plot and the parameters shown in verbose mode.
p.ioff()
fils.find_widths(verbose=True, max_distance=500, auto_cut=False, try_nonparam=False)
# Fit Parameters: [ 541.31726502 129.85351117 180.0710914 304.01262168
# Fit Errors: [ 0.89151974 0.48394493 0.27313627 1.1462345 ]
|
<commit_before><commit_msg>Use filfinder to get the average radial width of features in the moment 0<commit_after>
|
from fil_finder import fil_finder_2D
from basics import BubbleFinder2D
from spectral_cube.lower_dimensional_structures import Projection
from astropy.io import fits
from radio_beam import Beam
from astropy.wcs import WCS
import astropy.units as u
import matplotlib.pyplot as p
'''
Filaments in M33? Why not?
'''
mom0_fits = fits.open("/home/eric/MyRAID/M33/14B-088/HI/full_imaging/M33_14B-088_HI.clean.image.pbcov_gt_0.3_masked.mom0.fits")[0]
mom0 = Projection(mom0_fits.data, wcs=WCS(mom0_fits.header))
mom0.meta['beam'] = Beam.from_fits_header(mom0_fits.header)
# Create the bubble mask instead of letting FilFinder to do it.
bub = BubbleFinder2D(mom0, sigma=80.)
fils = fil_finder_2D(mom0.value, mom0.header, 10, distance=0.84e6)
fils.mask = ~(bub.mask.copy())
fils.medskel()
fils.analyze_skeletons()
# So at least on of the radial profiles fails. BUT the second fit is to a
# skeleton that is essentially the entire disk, so plot without interactivity
# and save the plot and the parameters shown in verbose mode.
p.ioff()
fils.find_widths(verbose=True, max_distance=500, auto_cut=False, try_nonparam=False)
# Fit Parameters: [ 541.31726502 129.85351117 180.0710914 304.01262168
# Fit Errors: [ 0.89151974 0.48394493 0.27313627 1.1462345 ]
|
Use filfinder to get the average radial width of features in the moment 0
from fil_finder import fil_finder_2D
from basics import BubbleFinder2D
from spectral_cube.lower_dimensional_structures import Projection
from astropy.io import fits
from radio_beam import Beam
from astropy.wcs import WCS
import astropy.units as u
import matplotlib.pyplot as p
'''
Filaments in M33? Why not?
'''
mom0_fits = fits.open("/home/eric/MyRAID/M33/14B-088/HI/full_imaging/M33_14B-088_HI.clean.image.pbcov_gt_0.3_masked.mom0.fits")[0]
mom0 = Projection(mom0_fits.data, wcs=WCS(mom0_fits.header))
mom0.meta['beam'] = Beam.from_fits_header(mom0_fits.header)
# Create the bubble mask instead of letting FilFinder to do it.
bub = BubbleFinder2D(mom0, sigma=80.)
fils = fil_finder_2D(mom0.value, mom0.header, 10, distance=0.84e6)
fils.mask = ~(bub.mask.copy())
fils.medskel()
fils.analyze_skeletons()
# So at least on of the radial profiles fails. BUT the second fit is to a
# skeleton that is essentially the entire disk, so plot without interactivity
# and save the plot and the parameters shown in verbose mode.
p.ioff()
fils.find_widths(verbose=True, max_distance=500, auto_cut=False, try_nonparam=False)
# Fit Parameters: [ 541.31726502 129.85351117 180.0710914 304.01262168
# Fit Errors: [ 0.89151974 0.48394493 0.27313627 1.1462345 ]
|
<commit_before><commit_msg>Use filfinder to get the average radial width of features in the moment 0<commit_after>
from fil_finder import fil_finder_2D
from basics import BubbleFinder2D
from spectral_cube.lower_dimensional_structures import Projection
from astropy.io import fits
from radio_beam import Beam
from astropy.wcs import WCS
import astropy.units as u
import matplotlib.pyplot as p
'''
Filaments in M33? Why not?
'''
mom0_fits = fits.open("/home/eric/MyRAID/M33/14B-088/HI/full_imaging/M33_14B-088_HI.clean.image.pbcov_gt_0.3_masked.mom0.fits")[0]
mom0 = Projection(mom0_fits.data, wcs=WCS(mom0_fits.header))
mom0.meta['beam'] = Beam.from_fits_header(mom0_fits.header)
# Create the bubble mask instead of letting FilFinder to do it.
bub = BubbleFinder2D(mom0, sigma=80.)
fils = fil_finder_2D(mom0.value, mom0.header, 10, distance=0.84e6)
fils.mask = ~(bub.mask.copy())
fils.medskel()
fils.analyze_skeletons()
# So at least on of the radial profiles fails. BUT the second fit is to a
# skeleton that is essentially the entire disk, so plot without interactivity
# and save the plot and the parameters shown in verbose mode.
p.ioff()
fils.find_widths(verbose=True, max_distance=500, auto_cut=False, try_nonparam=False)
# Fit Parameters: [ 541.31726502 129.85351117 180.0710914 304.01262168
# Fit Errors: [ 0.89151974 0.48394493 0.27313627 1.1462345 ]
|
|
da5fed886d519b271a120820668d21518872f52c
|
remove_duplicates_from_sorted_array.py
|
remove_duplicates_from_sorted_array.py
|
'''
Given a sorted array, remove the duplicates in place such that each element appear only once and return the new length.
Do not allocate extra space for another array, you must do this in place with constant memory.
For example,
Given input array A = [1,1,2],
Your function should return length = 2, and A is now [1,2].
'''
'''
Use two pointers. Quite straightforward.
'''
class Solution:
# @param a list of integers
# @return an integer
def removeDuplicates(self, A):
if len(A) < 2:
return len(A)
p1 = 0
p2 = 1
while p2 < len(A):
while p2 < len(A) and A[p1] == A[p2]:
p2 += 1
p1 += 1
if p2 < len(A):
A[p1] = A[p2]
return p1
if __name__ == '__main__':
s = Solution()
A = [1, 1, 2, 2, 3]
print s.removeDuplicates(A)
print A
A = [1, 1]
print s.removeDuplicates(A)
print A
|
Remove Duplicates from Sorted Array problem
|
Remove Duplicates from Sorted Array problem
|
Python
|
apache-2.0
|
zsmountain/leetcode,zsmountain/leetcode,zsmountain/leetcode
|
Remove Duplicates from Sorted Array problem
|
'''
Given a sorted array, remove the duplicates in place such that each element appear only once and return the new length.
Do not allocate extra space for another array, you must do this in place with constant memory.
For example,
Given input array A = [1,1,2],
Your function should return length = 2, and A is now [1,2].
'''
'''
Use two pointers. Quite straightforward.
'''
class Solution:
# @param a list of integers
# @return an integer
def removeDuplicates(self, A):
if len(A) < 2:
return len(A)
p1 = 0
p2 = 1
while p2 < len(A):
while p2 < len(A) and A[p1] == A[p2]:
p2 += 1
p1 += 1
if p2 < len(A):
A[p1] = A[p2]
return p1
if __name__ == '__main__':
s = Solution()
A = [1, 1, 2, 2, 3]
print s.removeDuplicates(A)
print A
A = [1, 1]
print s.removeDuplicates(A)
print A
|
<commit_before><commit_msg>Remove Duplicates from Sorted Array problem<commit_after>
|
'''
Given a sorted array, remove the duplicates in place such that each element appear only once and return the new length.
Do not allocate extra space for another array, you must do this in place with constant memory.
For example,
Given input array A = [1,1,2],
Your function should return length = 2, and A is now [1,2].
'''
'''
Use two pointers. Quite straightforward.
'''
class Solution:
# @param a list of integers
# @return an integer
def removeDuplicates(self, A):
if len(A) < 2:
return len(A)
p1 = 0
p2 = 1
while p2 < len(A):
while p2 < len(A) and A[p1] == A[p2]:
p2 += 1
p1 += 1
if p2 < len(A):
A[p1] = A[p2]
return p1
if __name__ == '__main__':
s = Solution()
A = [1, 1, 2, 2, 3]
print s.removeDuplicates(A)
print A
A = [1, 1]
print s.removeDuplicates(A)
print A
|
Remove Duplicates from Sorted Array problem'''
Given a sorted array, remove the duplicates in place such that each element appear only once and return the new length.
Do not allocate extra space for another array, you must do this in place with constant memory.
For example,
Given input array A = [1,1,2],
Your function should return length = 2, and A is now [1,2].
'''
'''
Use two pointers. Quite straightforward.
'''
class Solution:
# @param a list of integers
# @return an integer
def removeDuplicates(self, A):
if len(A) < 2:
return len(A)
p1 = 0
p2 = 1
while p2 < len(A):
while p2 < len(A) and A[p1] == A[p2]:
p2 += 1
p1 += 1
if p2 < len(A):
A[p1] = A[p2]
return p1
if __name__ == '__main__':
s = Solution()
A = [1, 1, 2, 2, 3]
print s.removeDuplicates(A)
print A
A = [1, 1]
print s.removeDuplicates(A)
print A
|
<commit_before><commit_msg>Remove Duplicates from Sorted Array problem<commit_after>'''
Given a sorted array, remove the duplicates in place such that each element appear only once and return the new length.
Do not allocate extra space for another array, you must do this in place with constant memory.
For example,
Given input array A = [1,1,2],
Your function should return length = 2, and A is now [1,2].
'''
'''
Use two pointers. Quite straightforward.
'''
class Solution:
# @param a list of integers
# @return an integer
def removeDuplicates(self, A):
if len(A) < 2:
return len(A)
p1 = 0
p2 = 1
while p2 < len(A):
while p2 < len(A) and A[p1] == A[p2]:
p2 += 1
p1 += 1
if p2 < len(A):
A[p1] = A[p2]
return p1
if __name__ == '__main__':
s = Solution()
A = [1, 1, 2, 2, 3]
print s.removeDuplicates(A)
print A
A = [1, 1]
print s.removeDuplicates(A)
print A
|
|
207116ee7ba8d8da521f497997da90066831a551
|
django3_codemods/replace_unicode_with_str.py
|
django3_codemods/replace_unicode_with_str.py
|
import sys
from bowler import Query
(
Query(sys.argv[1])
.select_function("__unicode__")
.rename('__str__')
.idiff()
),
(
Query(sys.argv[1])
.select_method("__unicode__")
.is_call()
.rename('__str__')
.idiff()
)
|
Add codemod to replace __unicode__ with __str__
|
Add codemod to replace __unicode__ with __str__
|
Python
|
apache-2.0
|
edx/repo-tools,edx/repo-tools
|
Add codemod to replace __unicode__ with __str__
|
import sys
from bowler import Query
(
Query(sys.argv[1])
.select_function("__unicode__")
.rename('__str__')
.idiff()
),
(
Query(sys.argv[1])
.select_method("__unicode__")
.is_call()
.rename('__str__')
.idiff()
)
|
<commit_before><commit_msg>Add codemod to replace __unicode__ with __str__<commit_after>
|
import sys
from bowler import Query
(
Query(sys.argv[1])
.select_function("__unicode__")
.rename('__str__')
.idiff()
),
(
Query(sys.argv[1])
.select_method("__unicode__")
.is_call()
.rename('__str__')
.idiff()
)
|
Add codemod to replace __unicode__ with __str__import sys
from bowler import Query
(
Query(sys.argv[1])
.select_function("__unicode__")
.rename('__str__')
.idiff()
),
(
Query(sys.argv[1])
.select_method("__unicode__")
.is_call()
.rename('__str__')
.idiff()
)
|
<commit_before><commit_msg>Add codemod to replace __unicode__ with __str__<commit_after>import sys
from bowler import Query
(
Query(sys.argv[1])
.select_function("__unicode__")
.rename('__str__')
.idiff()
),
(
Query(sys.argv[1])
.select_method("__unicode__")
.is_call()
.rename('__str__')
.idiff()
)
|
|
2d1624f088431e5f71214988499f732695a82b16
|
lbrynet/__init__.py
|
lbrynet/__init__.py
|
import logging
__version__ = "0.15.0rc3"
version = tuple(__version__.split('.'))
logging.getLogger(__name__).addHandler(logging.NullHandler())
|
import logging
__version__ = "0.15.0rc4"
version = tuple(__version__.split('.'))
logging.getLogger(__name__).addHandler(logging.NullHandler())
|
Bump version 0.15.0rc3 --> 0.15.0rc4
|
Bump version 0.15.0rc3 --> 0.15.0rc4
Signed-off-by: Jack Robison <40884020c67726395ea162083a125620dc32cdab@lbry.io>
|
Python
|
mit
|
lbryio/lbry,lbryio/lbry,zestyr/lbry,lbryio/lbry,zestyr/lbry,zestyr/lbry
|
import logging
__version__ = "0.15.0rc3"
version = tuple(__version__.split('.'))
logging.getLogger(__name__).addHandler(logging.NullHandler())
Bump version 0.15.0rc3 --> 0.15.0rc4
Signed-off-by: Jack Robison <40884020c67726395ea162083a125620dc32cdab@lbry.io>
|
import logging
__version__ = "0.15.0rc4"
version = tuple(__version__.split('.'))
logging.getLogger(__name__).addHandler(logging.NullHandler())
|
<commit_before>import logging
__version__ = "0.15.0rc3"
version = tuple(__version__.split('.'))
logging.getLogger(__name__).addHandler(logging.NullHandler())
<commit_msg>Bump version 0.15.0rc3 --> 0.15.0rc4
Signed-off-by: Jack Robison <40884020c67726395ea162083a125620dc32cdab@lbry.io><commit_after>
|
import logging
__version__ = "0.15.0rc4"
version = tuple(__version__.split('.'))
logging.getLogger(__name__).addHandler(logging.NullHandler())
|
import logging
__version__ = "0.15.0rc3"
version = tuple(__version__.split('.'))
logging.getLogger(__name__).addHandler(logging.NullHandler())
Bump version 0.15.0rc3 --> 0.15.0rc4
Signed-off-by: Jack Robison <40884020c67726395ea162083a125620dc32cdab@lbry.io>import logging
__version__ = "0.15.0rc4"
version = tuple(__version__.split('.'))
logging.getLogger(__name__).addHandler(logging.NullHandler())
|
<commit_before>import logging
__version__ = "0.15.0rc3"
version = tuple(__version__.split('.'))
logging.getLogger(__name__).addHandler(logging.NullHandler())
<commit_msg>Bump version 0.15.0rc3 --> 0.15.0rc4
Signed-off-by: Jack Robison <40884020c67726395ea162083a125620dc32cdab@lbry.io><commit_after>import logging
__version__ = "0.15.0rc4"
version = tuple(__version__.split('.'))
logging.getLogger(__name__).addHandler(logging.NullHandler())
|
4764b5248cf91042a12ce6aef77a04c37360eb4f
|
pyglab/pyglab.py
|
pyglab/pyglab.py
|
import requests
class Pyglab(object):
def __init__(self, token):
self.token = token
self.headers = {'PRIVATE-TOKEN', token}
self.user = None
def sudo(self, user):
"""Permanently set a different username. Returns the old username."""
previous_user = self.user
self.user = user
return previous_user
|
Add initial shell of Pyglab class.
|
Add initial shell of Pyglab class.
|
Python
|
mit
|
sloede/pyglab,sloede/pyglab
|
Add initial shell of Pyglab class.
|
import requests
class Pyglab(object):
def __init__(self, token):
self.token = token
self.headers = {'PRIVATE-TOKEN', token}
self.user = None
def sudo(self, user):
"""Permanently set a different username. Returns the old username."""
previous_user = self.user
self.user = user
return previous_user
|
<commit_before><commit_msg>Add initial shell of Pyglab class.<commit_after>
|
import requests
class Pyglab(object):
def __init__(self, token):
self.token = token
self.headers = {'PRIVATE-TOKEN', token}
self.user = None
def sudo(self, user):
"""Permanently set a different username. Returns the old username."""
previous_user = self.user
self.user = user
return previous_user
|
Add initial shell of Pyglab class.import requests
class Pyglab(object):
def __init__(self, token):
self.token = token
self.headers = {'PRIVATE-TOKEN', token}
self.user = None
def sudo(self, user):
"""Permanently set a different username. Returns the old username."""
previous_user = self.user
self.user = user
return previous_user
|
<commit_before><commit_msg>Add initial shell of Pyglab class.<commit_after>import requests
class Pyglab(object):
def __init__(self, token):
self.token = token
self.headers = {'PRIVATE-TOKEN', token}
self.user = None
def sudo(self, user):
"""Permanently set a different username. Returns the old username."""
previous_user = self.user
self.user = user
return previous_user
|
|
70e04b20c5d78b41546aa4ea1a1e2fd82af7527f
|
comrade/http/__init__.py
|
comrade/http/__init__.py
|
from django.core.serializers import json, serialize
from django.db.models.query import QuerySet
from django.http import HttpResponse
from django.utils import simplejson
class HttpJsonResponse(HttpResponse):
def __init__(self, object, status=None):
if isinstance(object, QuerySet):
content = serialize('json', object)
else:
content = simplejson.dumps(object, cls=json.DjangoJSONEncoder,
ensure_ascii=False)
super(HttpJsonResponse, self).__init__(
content, content_type='application/json')
|
Add JSON HttpResponse that does the encoding for you.
|
Add JSON HttpResponse that does the encoding for you.
|
Python
|
mit
|
bueda/django-comrade
|
Add JSON HttpResponse that does the encoding for you.
|
from django.core.serializers import json, serialize
from django.db.models.query import QuerySet
from django.http import HttpResponse
from django.utils import simplejson
class HttpJsonResponse(HttpResponse):
def __init__(self, object, status=None):
if isinstance(object, QuerySet):
content = serialize('json', object)
else:
content = simplejson.dumps(object, cls=json.DjangoJSONEncoder,
ensure_ascii=False)
super(HttpJsonResponse, self).__init__(
content, content_type='application/json')
|
<commit_before><commit_msg>Add JSON HttpResponse that does the encoding for you.<commit_after>
|
from django.core.serializers import json, serialize
from django.db.models.query import QuerySet
from django.http import HttpResponse
from django.utils import simplejson
class HttpJsonResponse(HttpResponse):
def __init__(self, object, status=None):
if isinstance(object, QuerySet):
content = serialize('json', object)
else:
content = simplejson.dumps(object, cls=json.DjangoJSONEncoder,
ensure_ascii=False)
super(HttpJsonResponse, self).__init__(
content, content_type='application/json')
|
Add JSON HttpResponse that does the encoding for you.from django.core.serializers import json, serialize
from django.db.models.query import QuerySet
from django.http import HttpResponse
from django.utils import simplejson
class HttpJsonResponse(HttpResponse):
def __init__(self, object, status=None):
if isinstance(object, QuerySet):
content = serialize('json', object)
else:
content = simplejson.dumps(object, cls=json.DjangoJSONEncoder,
ensure_ascii=False)
super(HttpJsonResponse, self).__init__(
content, content_type='application/json')
|
<commit_before><commit_msg>Add JSON HttpResponse that does the encoding for you.<commit_after>from django.core.serializers import json, serialize
from django.db.models.query import QuerySet
from django.http import HttpResponse
from django.utils import simplejson
class HttpJsonResponse(HttpResponse):
def __init__(self, object, status=None):
if isinstance(object, QuerySet):
content = serialize('json', object)
else:
content = simplejson.dumps(object, cls=json.DjangoJSONEncoder,
ensure_ascii=False)
super(HttpJsonResponse, self).__init__(
content, content_type='application/json')
|
|
6ad081e91e337e1627b70674109f45ba35248f8c
|
zou/migrations/versions/e839d6603c09_add_person_id_to_shot_history.py
|
zou/migrations/versions/e839d6603c09_add_person_id_to_shot_history.py
|
"""add person id to shot history
Revision ID: e839d6603c09
Revises: 346250b5304c
Create Date: 2020-12-14 12:00:19.045783
"""
from alembic import op
import sqlalchemy as sa
import sqlalchemy_utils
import sqlalchemy_utils
import uuid
# revision identifiers, used by Alembic.
revision = 'e839d6603c09'
down_revision = '346250b5304c'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('entity_version', sa.Column('person_id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), default=uuid.uuid4, nullable=True))
op.create_index(op.f('ix_entity_version_person_id'), 'entity_version', ['person_id'], unique=False)
op.create_foreign_key(None, 'entity_version', 'person', ['person_id'], ['id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'entity_version', type_='foreignkey')
op.drop_index(op.f('ix_entity_version_person_id'), table_name='entity_version')
op.drop_column('entity_version', 'person_id')
# ### end Alembic commands ###
|
Add missing migration file to the repo
|
[qa] Add missing migration file to the repo
|
Python
|
agpl-3.0
|
cgwire/zou
|
[qa] Add missing migration file to the repo
|
"""add person id to shot history
Revision ID: e839d6603c09
Revises: 346250b5304c
Create Date: 2020-12-14 12:00:19.045783
"""
from alembic import op
import sqlalchemy as sa
import sqlalchemy_utils
import sqlalchemy_utils
import uuid
# revision identifiers, used by Alembic.
revision = 'e839d6603c09'
down_revision = '346250b5304c'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('entity_version', sa.Column('person_id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), default=uuid.uuid4, nullable=True))
op.create_index(op.f('ix_entity_version_person_id'), 'entity_version', ['person_id'], unique=False)
op.create_foreign_key(None, 'entity_version', 'person', ['person_id'], ['id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'entity_version', type_='foreignkey')
op.drop_index(op.f('ix_entity_version_person_id'), table_name='entity_version')
op.drop_column('entity_version', 'person_id')
# ### end Alembic commands ###
|
<commit_before><commit_msg>[qa] Add missing migration file to the repo<commit_after>
|
"""add person id to shot history
Revision ID: e839d6603c09
Revises: 346250b5304c
Create Date: 2020-12-14 12:00:19.045783
"""
from alembic import op
import sqlalchemy as sa
import sqlalchemy_utils
import sqlalchemy_utils
import uuid
# revision identifiers, used by Alembic.
revision = 'e839d6603c09'
down_revision = '346250b5304c'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('entity_version', sa.Column('person_id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), default=uuid.uuid4, nullable=True))
op.create_index(op.f('ix_entity_version_person_id'), 'entity_version', ['person_id'], unique=False)
op.create_foreign_key(None, 'entity_version', 'person', ['person_id'], ['id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'entity_version', type_='foreignkey')
op.drop_index(op.f('ix_entity_version_person_id'), table_name='entity_version')
op.drop_column('entity_version', 'person_id')
# ### end Alembic commands ###
|
[qa] Add missing migration file to the repo"""add person id to shot history
Revision ID: e839d6603c09
Revises: 346250b5304c
Create Date: 2020-12-14 12:00:19.045783
"""
from alembic import op
import sqlalchemy as sa
import sqlalchemy_utils
import sqlalchemy_utils
import uuid
# revision identifiers, used by Alembic.
revision = 'e839d6603c09'
down_revision = '346250b5304c'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('entity_version', sa.Column('person_id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), default=uuid.uuid4, nullable=True))
op.create_index(op.f('ix_entity_version_person_id'), 'entity_version', ['person_id'], unique=False)
op.create_foreign_key(None, 'entity_version', 'person', ['person_id'], ['id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'entity_version', type_='foreignkey')
op.drop_index(op.f('ix_entity_version_person_id'), table_name='entity_version')
op.drop_column('entity_version', 'person_id')
# ### end Alembic commands ###
|
<commit_before><commit_msg>[qa] Add missing migration file to the repo<commit_after>"""add person id to shot history
Revision ID: e839d6603c09
Revises: 346250b5304c
Create Date: 2020-12-14 12:00:19.045783
"""
from alembic import op
import sqlalchemy as sa
import sqlalchemy_utils
import sqlalchemy_utils
import uuid
# revision identifiers, used by Alembic.
revision = 'e839d6603c09'
down_revision = '346250b5304c'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('entity_version', sa.Column('person_id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), default=uuid.uuid4, nullable=True))
op.create_index(op.f('ix_entity_version_person_id'), 'entity_version', ['person_id'], unique=False)
op.create_foreign_key(None, 'entity_version', 'person', ['person_id'], ['id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'entity_version', type_='foreignkey')
op.drop_index(op.f('ix_entity_version_person_id'), table_name='entity_version')
op.drop_column('entity_version', 'person_id')
# ### end Alembic commands ###
|
|
c25cebf31648466111cb3d576e0a398bb4220ccf
|
sabnzbd/test_cleanupfilename.py
|
sabnzbd/test_cleanupfilename.py
|
import unittest
from cleanupfilename import rename
class TestRename(unittest.TestCase):
files = []
dirs = []
def setUp(self):
self.files = [('filename-sample.x264.mp4', 'filename.mp4'),
('filename.mp4', 'filename.mp4')]
self.dirs = [('filename sample mp4', 'filename'),
('filename 540p', 'filename'),
('filename [web-DL] part001.', 'filename'),
('actual.file.name-1080p.BluRay.x264-GECKOS[rarbg]', 'actual file name'),
]
def test_list(self):
for file, output in self.files:
self.assertEqual(rename(file, False), output)
for file, output in self.dirs:
self.assertEqual(rename(file, True), output)
if __name__ == '__main__':
unittest.main()
|
Add test for sabnzbd cleanupfilename.py
|
Add test for sabnzbd cleanupfilename.py
|
Python
|
bsd-3-clause
|
FreekKalter/linux-scripts,FreekKalter/linux-scripts,FreekKalter/linux-scripts,FreekKalter/linux-scripts
|
Add test for sabnzbd cleanupfilename.py
|
import unittest
from cleanupfilename import rename
class TestRename(unittest.TestCase):
files = []
dirs = []
def setUp(self):
self.files = [('filename-sample.x264.mp4', 'filename.mp4'),
('filename.mp4', 'filename.mp4')]
self.dirs = [('filename sample mp4', 'filename'),
('filename 540p', 'filename'),
('filename [web-DL] part001.', 'filename'),
('actual.file.name-1080p.BluRay.x264-GECKOS[rarbg]', 'actual file name'),
]
def test_list(self):
for file, output in self.files:
self.assertEqual(rename(file, False), output)
for file, output in self.dirs:
self.assertEqual(rename(file, True), output)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test for sabnzbd cleanupfilename.py<commit_after>
|
import unittest
from cleanupfilename import rename
class TestRename(unittest.TestCase):
files = []
dirs = []
def setUp(self):
self.files = [('filename-sample.x264.mp4', 'filename.mp4'),
('filename.mp4', 'filename.mp4')]
self.dirs = [('filename sample mp4', 'filename'),
('filename 540p', 'filename'),
('filename [web-DL] part001.', 'filename'),
('actual.file.name-1080p.BluRay.x264-GECKOS[rarbg]', 'actual file name'),
]
def test_list(self):
for file, output in self.files:
self.assertEqual(rename(file, False), output)
for file, output in self.dirs:
self.assertEqual(rename(file, True), output)
if __name__ == '__main__':
unittest.main()
|
Add test for sabnzbd cleanupfilename.pyimport unittest
from cleanupfilename import rename
class TestRename(unittest.TestCase):
files = []
dirs = []
def setUp(self):
self.files = [('filename-sample.x264.mp4', 'filename.mp4'),
('filename.mp4', 'filename.mp4')]
self.dirs = [('filename sample mp4', 'filename'),
('filename 540p', 'filename'),
('filename [web-DL] part001.', 'filename'),
('actual.file.name-1080p.BluRay.x264-GECKOS[rarbg]', 'actual file name'),
]
def test_list(self):
for file, output in self.files:
self.assertEqual(rename(file, False), output)
for file, output in self.dirs:
self.assertEqual(rename(file, True), output)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test for sabnzbd cleanupfilename.py<commit_after>import unittest
from cleanupfilename import rename
class TestRename(unittest.TestCase):
files = []
dirs = []
def setUp(self):
self.files = [('filename-sample.x264.mp4', 'filename.mp4'),
('filename.mp4', 'filename.mp4')]
self.dirs = [('filename sample mp4', 'filename'),
('filename 540p', 'filename'),
('filename [web-DL] part001.', 'filename'),
('actual.file.name-1080p.BluRay.x264-GECKOS[rarbg]', 'actual file name'),
]
def test_list(self):
for file, output in self.files:
self.assertEqual(rename(file, False), output)
for file, output in self.dirs:
self.assertEqual(rename(file, True), output)
if __name__ == '__main__':
unittest.main()
|
|
31bb487a2f75268cb0b60ef4539935df83b68a84
|
quiz/3-radixsort.py
|
quiz/3-radixsort.py
|
#!/usr/bin/env python3
def make_arr(text):
return text.strip().split(' ')
def print_arr(arr):
for t in arr:
print(t, end=' ')
print()
def solve_q1(arr, time):
for t in range(len(arr[0]) - 1, time - 1, -1):
arr = sorted(arr, key=lambda x: x[t])
return arr
def msd_radix_sort(arr, start, end, depth):
if end - start <= 1:
return
global msd_radix_sort_left
if msd_radix_sort_left <= 0:
return
msd_radix_sort_left -= 1
arr[start:end] = sorted(arr[start:end], key=lambda x: x[depth])
pre_n = start
pre_v = arr[pre_n][depth]
for i in range(start, end):
if arr[i][depth] != pre_v:
pre_v = arr[i][depth]
msd_radix_sort(arr, pre_n, i, depth + 1)
pre_n = i
msd_radix_sort(arr, pre_n, end, depth + 1)
def solve_q2(arr, time):
global msd_radix_sort_left
msd_radix_sort_left = time
msd_radix_sort(arr, 0, len(arr), 0)
return arr
def solve_q3(arr):
k = arr[0][0]
l = 0
m = l
h = len(arr) - 1
while m <= h:
v = arr[m][0]
if v < k:
arr[m], arr[l] = arr[l], arr[m]
m += 1
l += 1
elif v == k:
m += 1
else: # arr[m] > k
arr[m], arr[h] = arr[h], arr[m]
h -= 1
return arr
q1 = ' 4322 4441 1244 3122 1332 2131 4431 3113 2244 1241'
q2 = ' 1324 3314 1122 3112 4423 3321 3344 4223 1412 1344 4314 4412 1333 2323 3243 '
q3 = ' 5552 5255 3462 2614 6432 5252 6543 6152 5156 5434 '
print_arr(solve_q1(make_arr(q1), 2))
print_arr(solve_q2(make_arr(q2), 3))
print_arr(solve_q3(make_arr(q3)))
|
Add auto solver for "W3-Radix Sorts".
|
Add auto solver for "W3-Radix Sorts".
|
Python
|
mit
|
hghwng/mooc-algs2,hghwng/mooc-algs2
|
Add auto solver for "W3-Radix Sorts".
|
#!/usr/bin/env python3
def make_arr(text):
return text.strip().split(' ')
def print_arr(arr):
for t in arr:
print(t, end=' ')
print()
def solve_q1(arr, time):
for t in range(len(arr[0]) - 1, time - 1, -1):
arr = sorted(arr, key=lambda x: x[t])
return arr
def msd_radix_sort(arr, start, end, depth):
if end - start <= 1:
return
global msd_radix_sort_left
if msd_radix_sort_left <= 0:
return
msd_radix_sort_left -= 1
arr[start:end] = sorted(arr[start:end], key=lambda x: x[depth])
pre_n = start
pre_v = arr[pre_n][depth]
for i in range(start, end):
if arr[i][depth] != pre_v:
pre_v = arr[i][depth]
msd_radix_sort(arr, pre_n, i, depth + 1)
pre_n = i
msd_radix_sort(arr, pre_n, end, depth + 1)
def solve_q2(arr, time):
global msd_radix_sort_left
msd_radix_sort_left = time
msd_radix_sort(arr, 0, len(arr), 0)
return arr
def solve_q3(arr):
k = arr[0][0]
l = 0
m = l
h = len(arr) - 1
while m <= h:
v = arr[m][0]
if v < k:
arr[m], arr[l] = arr[l], arr[m]
m += 1
l += 1
elif v == k:
m += 1
else: # arr[m] > k
arr[m], arr[h] = arr[h], arr[m]
h -= 1
return arr
q1 = ' 4322 4441 1244 3122 1332 2131 4431 3113 2244 1241'
q2 = ' 1324 3314 1122 3112 4423 3321 3344 4223 1412 1344 4314 4412 1333 2323 3243 '
q3 = ' 5552 5255 3462 2614 6432 5252 6543 6152 5156 5434 '
print_arr(solve_q1(make_arr(q1), 2))
print_arr(solve_q2(make_arr(q2), 3))
print_arr(solve_q3(make_arr(q3)))
|
<commit_before><commit_msg>Add auto solver for "W3-Radix Sorts".<commit_after>
|
#!/usr/bin/env python3
def make_arr(text):
return text.strip().split(' ')
def print_arr(arr):
for t in arr:
print(t, end=' ')
print()
def solve_q1(arr, time):
for t in range(len(arr[0]) - 1, time - 1, -1):
arr = sorted(arr, key=lambda x: x[t])
return arr
def msd_radix_sort(arr, start, end, depth):
if end - start <= 1:
return
global msd_radix_sort_left
if msd_radix_sort_left <= 0:
return
msd_radix_sort_left -= 1
arr[start:end] = sorted(arr[start:end], key=lambda x: x[depth])
pre_n = start
pre_v = arr[pre_n][depth]
for i in range(start, end):
if arr[i][depth] != pre_v:
pre_v = arr[i][depth]
msd_radix_sort(arr, pre_n, i, depth + 1)
pre_n = i
msd_radix_sort(arr, pre_n, end, depth + 1)
def solve_q2(arr, time):
global msd_radix_sort_left
msd_radix_sort_left = time
msd_radix_sort(arr, 0, len(arr), 0)
return arr
def solve_q3(arr):
k = arr[0][0]
l = 0
m = l
h = len(arr) - 1
while m <= h:
v = arr[m][0]
if v < k:
arr[m], arr[l] = arr[l], arr[m]
m += 1
l += 1
elif v == k:
m += 1
else: # arr[m] > k
arr[m], arr[h] = arr[h], arr[m]
h -= 1
return arr
q1 = ' 4322 4441 1244 3122 1332 2131 4431 3113 2244 1241'
q2 = ' 1324 3314 1122 3112 4423 3321 3344 4223 1412 1344 4314 4412 1333 2323 3243 '
q3 = ' 5552 5255 3462 2614 6432 5252 6543 6152 5156 5434 '
print_arr(solve_q1(make_arr(q1), 2))
print_arr(solve_q2(make_arr(q2), 3))
print_arr(solve_q3(make_arr(q3)))
|
Add auto solver for "W3-Radix Sorts".#!/usr/bin/env python3
def make_arr(text):
return text.strip().split(' ')
def print_arr(arr):
for t in arr:
print(t, end=' ')
print()
def solve_q1(arr, time):
for t in range(len(arr[0]) - 1, time - 1, -1):
arr = sorted(arr, key=lambda x: x[t])
return arr
def msd_radix_sort(arr, start, end, depth):
if end - start <= 1:
return
global msd_radix_sort_left
if msd_radix_sort_left <= 0:
return
msd_radix_sort_left -= 1
arr[start:end] = sorted(arr[start:end], key=lambda x: x[depth])
pre_n = start
pre_v = arr[pre_n][depth]
for i in range(start, end):
if arr[i][depth] != pre_v:
pre_v = arr[i][depth]
msd_radix_sort(arr, pre_n, i, depth + 1)
pre_n = i
msd_radix_sort(arr, pre_n, end, depth + 1)
def solve_q2(arr, time):
global msd_radix_sort_left
msd_radix_sort_left = time
msd_radix_sort(arr, 0, len(arr), 0)
return arr
def solve_q3(arr):
k = arr[0][0]
l = 0
m = l
h = len(arr) - 1
while m <= h:
v = arr[m][0]
if v < k:
arr[m], arr[l] = arr[l], arr[m]
m += 1
l += 1
elif v == k:
m += 1
else: # arr[m] > k
arr[m], arr[h] = arr[h], arr[m]
h -= 1
return arr
q1 = ' 4322 4441 1244 3122 1332 2131 4431 3113 2244 1241'
q2 = ' 1324 3314 1122 3112 4423 3321 3344 4223 1412 1344 4314 4412 1333 2323 3243 '
q3 = ' 5552 5255 3462 2614 6432 5252 6543 6152 5156 5434 '
print_arr(solve_q1(make_arr(q1), 2))
print_arr(solve_q2(make_arr(q2), 3))
print_arr(solve_q3(make_arr(q3)))
|
<commit_before><commit_msg>Add auto solver for "W3-Radix Sorts".<commit_after>#!/usr/bin/env python3
def make_arr(text):
return text.strip().split(' ')
def print_arr(arr):
for t in arr:
print(t, end=' ')
print()
def solve_q1(arr, time):
for t in range(len(arr[0]) - 1, time - 1, -1):
arr = sorted(arr, key=lambda x: x[t])
return arr
def msd_radix_sort(arr, start, end, depth):
if end - start <= 1:
return
global msd_radix_sort_left
if msd_radix_sort_left <= 0:
return
msd_radix_sort_left -= 1
arr[start:end] = sorted(arr[start:end], key=lambda x: x[depth])
pre_n = start
pre_v = arr[pre_n][depth]
for i in range(start, end):
if arr[i][depth] != pre_v:
pre_v = arr[i][depth]
msd_radix_sort(arr, pre_n, i, depth + 1)
pre_n = i
msd_radix_sort(arr, pre_n, end, depth + 1)
def solve_q2(arr, time):
global msd_radix_sort_left
msd_radix_sort_left = time
msd_radix_sort(arr, 0, len(arr), 0)
return arr
def solve_q3(arr):
k = arr[0][0]
l = 0
m = l
h = len(arr) - 1
while m <= h:
v = arr[m][0]
if v < k:
arr[m], arr[l] = arr[l], arr[m]
m += 1
l += 1
elif v == k:
m += 1
else: # arr[m] > k
arr[m], arr[h] = arr[h], arr[m]
h -= 1
return arr
q1 = ' 4322 4441 1244 3122 1332 2131 4431 3113 2244 1241'
q2 = ' 1324 3314 1122 3112 4423 3321 3344 4223 1412 1344 4314 4412 1333 2323 3243 '
q3 = ' 5552 5255 3462 2614 6432 5252 6543 6152 5156 5434 '
print_arr(solve_q1(make_arr(q1), 2))
print_arr(solve_q2(make_arr(q2), 3))
print_arr(solve_q3(make_arr(q3)))
|
|
896270bcd99b26e4128fd35dd3821a59807ae850
|
doc/model/model_decla.py
|
doc/model/model_decla.py
|
#autogenerated by sqlautocode
from sqlalchemy import *
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relation
engine = create_engine('mysql://monty:passwd@localhost/test_dia')
DeclarativeBase = declarative_base()
metadata = DeclarativeBase.metadata
metadata.bind = engine
class Metering(DeclarativeBase):
__tablename__ = 'Metering'
__table_args__ = {}
#column definitions
date = Column(u'date', DATE())
id = Column(u'id', INTEGER(), primary_key=True, nullable=False)
sensor_id = Column(u'sensor_id', INTEGER(), ForeignKey('Sensor.id'))
value = Column(u'value', FLOAT())
#relation definitions
Sensor = relation('Sensor', primaryjoin='Metering.sensor_id==Sensor.id')
class Sensor(DeclarativeBase):
__tablename__ = 'Sensor'
__table_args__ = {}
#column definitions
bus_adress = Column(u'bus_adress', VARCHAR(length=255))
description = Column(u'description', VARCHAR(length=255))
high_threshold = Column(u'high_threshold', FLOAT())
id = Column(u'id', INTEGER(), primary_key=True, nullable=False)
low_threshold = Column(u'low_threshold', FLOAT())
max_value = Column(u'max_value', FLOAT())
min_value = Column(u'min_value', FLOAT())
name = Column(u'name', VARCHAR(length=255))
unique_key = Column(u'unique_key', VARCHAR(length=255))
unit = Column(u'unit', VARCHAR(length=255))
unit_label = Column(u'unit_label', VARCHAR(length=255))
#relation definitions
|
Add the model.py file declarative generated from mysql.
|
Add the model.py file declarative generated from mysql.
|
Python
|
mit
|
mteule/StationMeteo,mteule/StationMeteo
|
Add the model.py file declarative generated from mysql.
|
#autogenerated by sqlautocode
from sqlalchemy import *
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relation
engine = create_engine('mysql://monty:passwd@localhost/test_dia')
DeclarativeBase = declarative_base()
metadata = DeclarativeBase.metadata
metadata.bind = engine
class Metering(DeclarativeBase):
__tablename__ = 'Metering'
__table_args__ = {}
#column definitions
date = Column(u'date', DATE())
id = Column(u'id', INTEGER(), primary_key=True, nullable=False)
sensor_id = Column(u'sensor_id', INTEGER(), ForeignKey('Sensor.id'))
value = Column(u'value', FLOAT())
#relation definitions
Sensor = relation('Sensor', primaryjoin='Metering.sensor_id==Sensor.id')
class Sensor(DeclarativeBase):
__tablename__ = 'Sensor'
__table_args__ = {}
#column definitions
bus_adress = Column(u'bus_adress', VARCHAR(length=255))
description = Column(u'description', VARCHAR(length=255))
high_threshold = Column(u'high_threshold', FLOAT())
id = Column(u'id', INTEGER(), primary_key=True, nullable=False)
low_threshold = Column(u'low_threshold', FLOAT())
max_value = Column(u'max_value', FLOAT())
min_value = Column(u'min_value', FLOAT())
name = Column(u'name', VARCHAR(length=255))
unique_key = Column(u'unique_key', VARCHAR(length=255))
unit = Column(u'unit', VARCHAR(length=255))
unit_label = Column(u'unit_label', VARCHAR(length=255))
#relation definitions
|
<commit_before><commit_msg>Add the model.py file declarative generated from mysql.<commit_after>
|
#autogenerated by sqlautocode
from sqlalchemy import *
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relation
engine = create_engine('mysql://monty:passwd@localhost/test_dia')
DeclarativeBase = declarative_base()
metadata = DeclarativeBase.metadata
metadata.bind = engine
class Metering(DeclarativeBase):
__tablename__ = 'Metering'
__table_args__ = {}
#column definitions
date = Column(u'date', DATE())
id = Column(u'id', INTEGER(), primary_key=True, nullable=False)
sensor_id = Column(u'sensor_id', INTEGER(), ForeignKey('Sensor.id'))
value = Column(u'value', FLOAT())
#relation definitions
Sensor = relation('Sensor', primaryjoin='Metering.sensor_id==Sensor.id')
class Sensor(DeclarativeBase):
__tablename__ = 'Sensor'
__table_args__ = {}
#column definitions
bus_adress = Column(u'bus_adress', VARCHAR(length=255))
description = Column(u'description', VARCHAR(length=255))
high_threshold = Column(u'high_threshold', FLOAT())
id = Column(u'id', INTEGER(), primary_key=True, nullable=False)
low_threshold = Column(u'low_threshold', FLOAT())
max_value = Column(u'max_value', FLOAT())
min_value = Column(u'min_value', FLOAT())
name = Column(u'name', VARCHAR(length=255))
unique_key = Column(u'unique_key', VARCHAR(length=255))
unit = Column(u'unit', VARCHAR(length=255))
unit_label = Column(u'unit_label', VARCHAR(length=255))
#relation definitions
|
Add the model.py file declarative generated from mysql.#autogenerated by sqlautocode
from sqlalchemy import *
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relation
engine = create_engine('mysql://monty:passwd@localhost/test_dia')
DeclarativeBase = declarative_base()
metadata = DeclarativeBase.metadata
metadata.bind = engine
class Metering(DeclarativeBase):
__tablename__ = 'Metering'
__table_args__ = {}
#column definitions
date = Column(u'date', DATE())
id = Column(u'id', INTEGER(), primary_key=True, nullable=False)
sensor_id = Column(u'sensor_id', INTEGER(), ForeignKey('Sensor.id'))
value = Column(u'value', FLOAT())
#relation definitions
Sensor = relation('Sensor', primaryjoin='Metering.sensor_id==Sensor.id')
class Sensor(DeclarativeBase):
__tablename__ = 'Sensor'
__table_args__ = {}
#column definitions
bus_adress = Column(u'bus_adress', VARCHAR(length=255))
description = Column(u'description', VARCHAR(length=255))
high_threshold = Column(u'high_threshold', FLOAT())
id = Column(u'id', INTEGER(), primary_key=True, nullable=False)
low_threshold = Column(u'low_threshold', FLOAT())
max_value = Column(u'max_value', FLOAT())
min_value = Column(u'min_value', FLOAT())
name = Column(u'name', VARCHAR(length=255))
unique_key = Column(u'unique_key', VARCHAR(length=255))
unit = Column(u'unit', VARCHAR(length=255))
unit_label = Column(u'unit_label', VARCHAR(length=255))
#relation definitions
|
<commit_before><commit_msg>Add the model.py file declarative generated from mysql.<commit_after>#autogenerated by sqlautocode
from sqlalchemy import *
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relation
engine = create_engine('mysql://monty:passwd@localhost/test_dia')
DeclarativeBase = declarative_base()
metadata = DeclarativeBase.metadata
metadata.bind = engine
class Metering(DeclarativeBase):
__tablename__ = 'Metering'
__table_args__ = {}
#column definitions
date = Column(u'date', DATE())
id = Column(u'id', INTEGER(), primary_key=True, nullable=False)
sensor_id = Column(u'sensor_id', INTEGER(), ForeignKey('Sensor.id'))
value = Column(u'value', FLOAT())
#relation definitions
Sensor = relation('Sensor', primaryjoin='Metering.sensor_id==Sensor.id')
class Sensor(DeclarativeBase):
__tablename__ = 'Sensor'
__table_args__ = {}
#column definitions
bus_adress = Column(u'bus_adress', VARCHAR(length=255))
description = Column(u'description', VARCHAR(length=255))
high_threshold = Column(u'high_threshold', FLOAT())
id = Column(u'id', INTEGER(), primary_key=True, nullable=False)
low_threshold = Column(u'low_threshold', FLOAT())
max_value = Column(u'max_value', FLOAT())
min_value = Column(u'min_value', FLOAT())
name = Column(u'name', VARCHAR(length=255))
unique_key = Column(u'unique_key', VARCHAR(length=255))
unit = Column(u'unit', VARCHAR(length=255))
unit_label = Column(u'unit_label', VARCHAR(length=255))
#relation definitions
|
|
7aab44f006a6412d8f169c3f9a801f41a6ea0a95
|
migrations/versions/880_remove_invalid_draft_dos2_brief_dates_again.py
|
migrations/versions/880_remove_invalid_draft_dos2_brief_dates_again.py
|
"""Remove dates from draft dos2 briefs.
This is identical to the previous migration but will be run again to cover any draft briefs with invalid
dates that could have appeared during the previous API rollout process (after the previous migration but before
the code propogated fully to the ec2 instances).
Revision ID: 880
Revises: 870
Create Date: 2016-04-07
"""
# revision identifiers, used by Alembic.
revision = '880'
down_revision = '870'
from alembic import op
import sqlalchemy as sa
frameworks_table = sa.Table(
'frameworks',
sa.MetaData(),
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('slug', sa.String, nullable=False, unique=True, index=True)
)
briefs_table = sa.Table(
'briefs',
sa.MetaData(),
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('framework_id', sa.Integer, nullable=False),
sa.Column('published_at', sa.DateTime, nullable=True),
sa.Column('data', sa.JSON, nullable=True)
)
def upgrade():
"""Remove question and answer for startDate from briefs.data for draft dos2 briefs."""
conn = op.get_bind()
# SELECT id, data
# FROM briefs JOIN frameworks ON briefs.framework_id = frameworks.id
# WHERE frameworks.slug = 'digital-outcomes-and-specialists-2' AND briefs.published_at IS null;
query = briefs_table.join(
frameworks_table,
briefs_table.c.framework_id == frameworks_table.c.id
).select(
sa.and_(
frameworks_table.c.slug == 'digital-outcomes-and-specialists-2',
briefs_table.c.published_at == sa.null()
)
).with_only_columns(
(
briefs_table.c.id,
briefs_table.c.data
)
)
results = conn.execute(query).fetchall()
for brief_id, brief_data in results:
if brief_data.pop('startDate', None) is not None:
# UPDATE briefs SET data = _brief_data WHERE id = _brief_id;
query = briefs_table.update().where(briefs_table.c.id==brief_id).values(data=brief_data)
conn.execute(query)
def downgrade():
pass
|
Remove start dates for the second time from draft dos2 briefs
|
Remove start dates for the second time from draft dos2 briefs
This is identical to the previous migration but will be run again
to cover any draft briefs with invalid dates that could have
appeared during the previous API rollout process (after the previous
migration but before the code propogated fully to the ec2 instances).
https://trello.com/c/BA5KSAvm/40-delete-start-dates-on-draft-requirements
|
Python
|
mit
|
alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api
|
Remove start dates for the second time from draft dos2 briefs
This is identical to the previous migration but will be run again
to cover any draft briefs with invalid dates that could have
appeared during the previous API rollout process (after the previous
migration but before the code propogated fully to the ec2 instances).
https://trello.com/c/BA5KSAvm/40-delete-start-dates-on-draft-requirements
|
"""Remove dates from draft dos2 briefs.
This is identical to the previous migration but will be run again to cover any draft briefs with invalid
dates that could have appeared during the previous API rollout process (after the previous migration but before
the code propogated fully to the ec2 instances).
Revision ID: 880
Revises: 870
Create Date: 2016-04-07
"""
# revision identifiers, used by Alembic.
revision = '880'
down_revision = '870'
from alembic import op
import sqlalchemy as sa
frameworks_table = sa.Table(
'frameworks',
sa.MetaData(),
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('slug', sa.String, nullable=False, unique=True, index=True)
)
briefs_table = sa.Table(
'briefs',
sa.MetaData(),
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('framework_id', sa.Integer, nullable=False),
sa.Column('published_at', sa.DateTime, nullable=True),
sa.Column('data', sa.JSON, nullable=True)
)
def upgrade():
"""Remove question and answer for startDate from briefs.data for draft dos2 briefs."""
conn = op.get_bind()
# SELECT id, data
# FROM briefs JOIN frameworks ON briefs.framework_id = frameworks.id
# WHERE frameworks.slug = 'digital-outcomes-and-specialists-2' AND briefs.published_at IS null;
query = briefs_table.join(
frameworks_table,
briefs_table.c.framework_id == frameworks_table.c.id
).select(
sa.and_(
frameworks_table.c.slug == 'digital-outcomes-and-specialists-2',
briefs_table.c.published_at == sa.null()
)
).with_only_columns(
(
briefs_table.c.id,
briefs_table.c.data
)
)
results = conn.execute(query).fetchall()
for brief_id, brief_data in results:
if brief_data.pop('startDate', None) is not None:
# UPDATE briefs SET data = _brief_data WHERE id = _brief_id;
query = briefs_table.update().where(briefs_table.c.id==brief_id).values(data=brief_data)
conn.execute(query)
def downgrade():
pass
|
<commit_before><commit_msg>Remove start dates for the second time from draft dos2 briefs
This is identical to the previous migration but will be run again
to cover any draft briefs with invalid dates that could have
appeared during the previous API rollout process (after the previous
migration but before the code propogated fully to the ec2 instances).
https://trello.com/c/BA5KSAvm/40-delete-start-dates-on-draft-requirements<commit_after>
|
"""Remove dates from draft dos2 briefs.
This is identical to the previous migration but will be run again to cover any draft briefs with invalid
dates that could have appeared during the previous API rollout process (after the previous migration but before
the code propogated fully to the ec2 instances).
Revision ID: 880
Revises: 870
Create Date: 2016-04-07
"""
# revision identifiers, used by Alembic.
revision = '880'
down_revision = '870'
from alembic import op
import sqlalchemy as sa
frameworks_table = sa.Table(
'frameworks',
sa.MetaData(),
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('slug', sa.String, nullable=False, unique=True, index=True)
)
briefs_table = sa.Table(
'briefs',
sa.MetaData(),
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('framework_id', sa.Integer, nullable=False),
sa.Column('published_at', sa.DateTime, nullable=True),
sa.Column('data', sa.JSON, nullable=True)
)
def upgrade():
"""Remove question and answer for startDate from briefs.data for draft dos2 briefs."""
conn = op.get_bind()
# SELECT id, data
# FROM briefs JOIN frameworks ON briefs.framework_id = frameworks.id
# WHERE frameworks.slug = 'digital-outcomes-and-specialists-2' AND briefs.published_at IS null;
query = briefs_table.join(
frameworks_table,
briefs_table.c.framework_id == frameworks_table.c.id
).select(
sa.and_(
frameworks_table.c.slug == 'digital-outcomes-and-specialists-2',
briefs_table.c.published_at == sa.null()
)
).with_only_columns(
(
briefs_table.c.id,
briefs_table.c.data
)
)
results = conn.execute(query).fetchall()
for brief_id, brief_data in results:
if brief_data.pop('startDate', None) is not None:
# UPDATE briefs SET data = _brief_data WHERE id = _brief_id;
query = briefs_table.update().where(briefs_table.c.id==brief_id).values(data=brief_data)
conn.execute(query)
def downgrade():
pass
|
Remove start dates for the second time from draft dos2 briefs
This is identical to the previous migration but will be run again
to cover any draft briefs with invalid dates that could have
appeared during the previous API rollout process (after the previous
migration but before the code propogated fully to the ec2 instances).
https://trello.com/c/BA5KSAvm/40-delete-start-dates-on-draft-requirements"""Remove dates from draft dos2 briefs.
This is identical to the previous migration but will be run again to cover any draft briefs with invalid
dates that could have appeared during the previous API rollout process (after the previous migration but before
the code propogated fully to the ec2 instances).
Revision ID: 880
Revises: 870
Create Date: 2016-04-07
"""
# revision identifiers, used by Alembic.
revision = '880'
down_revision = '870'
from alembic import op
import sqlalchemy as sa
frameworks_table = sa.Table(
'frameworks',
sa.MetaData(),
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('slug', sa.String, nullable=False, unique=True, index=True)
)
briefs_table = sa.Table(
'briefs',
sa.MetaData(),
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('framework_id', sa.Integer, nullable=False),
sa.Column('published_at', sa.DateTime, nullable=True),
sa.Column('data', sa.JSON, nullable=True)
)
def upgrade():
"""Remove question and answer for startDate from briefs.data for draft dos2 briefs."""
conn = op.get_bind()
# SELECT id, data
# FROM briefs JOIN frameworks ON briefs.framework_id = frameworks.id
# WHERE frameworks.slug = 'digital-outcomes-and-specialists-2' AND briefs.published_at IS null;
query = briefs_table.join(
frameworks_table,
briefs_table.c.framework_id == frameworks_table.c.id
).select(
sa.and_(
frameworks_table.c.slug == 'digital-outcomes-and-specialists-2',
briefs_table.c.published_at == sa.null()
)
).with_only_columns(
(
briefs_table.c.id,
briefs_table.c.data
)
)
results = conn.execute(query).fetchall()
for brief_id, brief_data in results:
if brief_data.pop('startDate', None) is not None:
# UPDATE briefs SET data = _brief_data WHERE id = _brief_id;
query = briefs_table.update().where(briefs_table.c.id==brief_id).values(data=brief_data)
conn.execute(query)
def downgrade():
pass
|
<commit_before><commit_msg>Remove start dates for the second time from draft dos2 briefs
This is identical to the previous migration but will be run again
to cover any draft briefs with invalid dates that could have
appeared during the previous API rollout process (after the previous
migration but before the code propogated fully to the ec2 instances).
https://trello.com/c/BA5KSAvm/40-delete-start-dates-on-draft-requirements<commit_after>"""Remove dates from draft dos2 briefs.
This is identical to the previous migration but will be run again to cover any draft briefs with invalid
dates that could have appeared during the previous API rollout process (after the previous migration but before
the code propogated fully to the ec2 instances).
Revision ID: 880
Revises: 870
Create Date: 2016-04-07
"""
# revision identifiers, used by Alembic.
revision = '880'
down_revision = '870'
from alembic import op
import sqlalchemy as sa
frameworks_table = sa.Table(
'frameworks',
sa.MetaData(),
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('slug', sa.String, nullable=False, unique=True, index=True)
)
briefs_table = sa.Table(
'briefs',
sa.MetaData(),
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('framework_id', sa.Integer, nullable=False),
sa.Column('published_at', sa.DateTime, nullable=True),
sa.Column('data', sa.JSON, nullable=True)
)
def upgrade():
"""Remove question and answer for startDate from briefs.data for draft dos2 briefs."""
conn = op.get_bind()
# SELECT id, data
# FROM briefs JOIN frameworks ON briefs.framework_id = frameworks.id
# WHERE frameworks.slug = 'digital-outcomes-and-specialists-2' AND briefs.published_at IS null;
query = briefs_table.join(
frameworks_table,
briefs_table.c.framework_id == frameworks_table.c.id
).select(
sa.and_(
frameworks_table.c.slug == 'digital-outcomes-and-specialists-2',
briefs_table.c.published_at == sa.null()
)
).with_only_columns(
(
briefs_table.c.id,
briefs_table.c.data
)
)
results = conn.execute(query).fetchall()
for brief_id, brief_data in results:
if brief_data.pop('startDate', None) is not None:
# UPDATE briefs SET data = _brief_data WHERE id = _brief_id;
query = briefs_table.update().where(briefs_table.c.id==brief_id).values(data=brief_data)
conn.execute(query)
def downgrade():
pass
|
|
3ebae0f57ae3396213eb28b6fc7a23ff3e3c4980
|
uml-to-cpp.py
|
uml-to-cpp.py
|
# Copyright (C) 2017 Bran Seals. All rights reserved.
# Created: 2017-06-05
print("== UML to CPP ==")
print("Create or modify C++ header and implementation files by plaintext UML.")
#print("Enter a UML filename: ") # file import currently disabled
# check if file isn't too bonkers
#uml = [] # pull UML into memory as string list
# check if file is properly formatted
classList = [] # list of classes that will be created, along with members
noteList = [] # if weird things happen, this list will show potential errors
# will be displayed after files are created for user info
# while uml list items exist:
# get class name
# while } not reached:
# if +, put into hppPub
# if -, put into hppPriv
# if neither, put into hppPriv and add message to noteList
# use these to create UmlClass object and append to classList
# for each in classList:
# build hpp list using hpp, hppPublic, hppPrivate
# checkForLibs()
# while hpp list item exists:
# if isFunction, append to functions list
# while functions list item exists:
# format function and append to cpp list
# create name.hpp file and write using hpp list
# create name.cpp file and write using cpp list
# remove object from classList?
class UmlClass:
def __init__(self, className, hppPub, hppPriv):
self.name = className
self.hppPublic = list(hppPub)
self.hppPrivate = list(hppPriv)
functions = [] # list of functions used to build cpp file
hpp = [] # will contain final hpp template, built from hppPub, hppPriv
cpp = [] # same as hpp, but with implementation file
#def isFunction(): # looks for function syntax
# used when creating cpp file from hpp list
#def checkForLibs(): # include libraries for data types that need them
#def formatFunc(): # formats function from hpp to cpp style
# also takes into account return type and variable names
|
Create file and add pseudocode
|
Create file and add pseudocode
|
Python
|
mit
|
BranSeals/uml-to-cpp
|
Create file and add pseudocode
|
# Copyright (C) 2017 Bran Seals. All rights reserved.
# Created: 2017-06-05
print("== UML to CPP ==")
print("Create or modify C++ header and implementation files by plaintext UML.")
#print("Enter a UML filename: ") # file import currently disabled
# check if file isn't too bonkers
#uml = [] # pull UML into memory as string list
# check if file is properly formatted
classList = [] # list of classes that will be created, along with members
noteList = [] # if weird things happen, this list will show potential errors
# will be displayed after files are created for user info
# while uml list items exist:
# get class name
# while } not reached:
# if +, put into hppPub
# if -, put into hppPriv
# if neither, put into hppPriv and add message to noteList
# use these to create UmlClass object and append to classList
# for each in classList:
# build hpp list using hpp, hppPublic, hppPrivate
# checkForLibs()
# while hpp list item exists:
# if isFunction, append to functions list
# while functions list item exists:
# format function and append to cpp list
# create name.hpp file and write using hpp list
# create name.cpp file and write using cpp list
# remove object from classList?
class UmlClass:
def __init__(self, className, hppPub, hppPriv):
self.name = className
self.hppPublic = list(hppPub)
self.hppPrivate = list(hppPriv)
functions = [] # list of functions used to build cpp file
hpp = [] # will contain final hpp template, built from hppPub, hppPriv
cpp = [] # same as hpp, but with implementation file
#def isFunction(): # looks for function syntax
# used when creating cpp file from hpp list
#def checkForLibs(): # include libraries for data types that need them
#def formatFunc(): # formats function from hpp to cpp style
# also takes into account return type and variable names
|
<commit_before><commit_msg>Create file and add pseudocode<commit_after>
|
# Copyright (C) 2017 Bran Seals. All rights reserved.
# Created: 2017-06-05
print("== UML to CPP ==")
print("Create or modify C++ header and implementation files by plaintext UML.")
#print("Enter a UML filename: ") # file import currently disabled
# check if file isn't too bonkers
#uml = [] # pull UML into memory as string list
# check if file is properly formatted
classList = [] # list of classes that will be created, along with members
noteList = [] # if weird things happen, this list will show potential errors
# will be displayed after files are created for user info
# while uml list items exist:
# get class name
# while } not reached:
# if +, put into hppPub
# if -, put into hppPriv
# if neither, put into hppPriv and add message to noteList
# use these to create UmlClass object and append to classList
# for each in classList:
# build hpp list using hpp, hppPublic, hppPrivate
# checkForLibs()
# while hpp list item exists:
# if isFunction, append to functions list
# while functions list item exists:
# format function and append to cpp list
# create name.hpp file and write using hpp list
# create name.cpp file and write using cpp list
# remove object from classList?
class UmlClass:
def __init__(self, className, hppPub, hppPriv):
self.name = className
self.hppPublic = list(hppPub)
self.hppPrivate = list(hppPriv)
functions = [] # list of functions used to build cpp file
hpp = [] # will contain final hpp template, built from hppPub, hppPriv
cpp = [] # same as hpp, but with implementation file
#def isFunction(): # looks for function syntax
# used when creating cpp file from hpp list
#def checkForLibs(): # include libraries for data types that need them
#def formatFunc(): # formats function from hpp to cpp style
# also takes into account return type and variable names
|
Create file and add pseudocode# Copyright (C) 2017 Bran Seals. All rights reserved.
# Created: 2017-06-05
print("== UML to CPP ==")
print("Create or modify C++ header and implementation files by plaintext UML.")
#print("Enter a UML filename: ") # file import currently disabled
# check if file isn't too bonkers
#uml = [] # pull UML into memory as string list
# check if file is properly formatted
classList = [] # list of classes that will be created, along with members
noteList = [] # if weird things happen, this list will show potential errors
# will be displayed after files are created for user info
# while uml list items exist:
# get class name
# while } not reached:
# if +, put into hppPub
# if -, put into hppPriv
# if neither, put into hppPriv and add message to noteList
# use these to create UmlClass object and append to classList
# for each in classList:
# build hpp list using hpp, hppPublic, hppPrivate
# checkForLibs()
# while hpp list item exists:
# if isFunction, append to functions list
# while functions list item exists:
# format function and append to cpp list
# create name.hpp file and write using hpp list
# create name.cpp file and write using cpp list
# remove object from classList?
class UmlClass:
def __init__(self, className, hppPub, hppPriv):
self.name = className
self.hppPublic = list(hppPub)
self.hppPrivate = list(hppPriv)
functions = [] # list of functions used to build cpp file
hpp = [] # will contain final hpp template, built from hppPub, hppPriv
cpp = [] # same as hpp, but with implementation file
#def isFunction(): # looks for function syntax
# used when creating cpp file from hpp list
#def checkForLibs(): # include libraries for data types that need them
#def formatFunc(): # formats function from hpp to cpp style
# also takes into account return type and variable names
|
<commit_before><commit_msg>Create file and add pseudocode<commit_after># Copyright (C) 2017 Bran Seals. All rights reserved.
# Created: 2017-06-05
print("== UML to CPP ==")
print("Create or modify C++ header and implementation files by plaintext UML.")
#print("Enter a UML filename: ") # file import currently disabled
# check if file isn't too bonkers
#uml = [] # pull UML into memory as string list
# check if file is properly formatted
classList = [] # list of classes that will be created, along with members
noteList = [] # if weird things happen, this list will show potential errors
# will be displayed after files are created for user info
# while uml list items exist:
# get class name
# while } not reached:
# if +, put into hppPub
# if -, put into hppPriv
# if neither, put into hppPriv and add message to noteList
# use these to create UmlClass object and append to classList
# for each in classList:
# build hpp list using hpp, hppPublic, hppPrivate
# checkForLibs()
# while hpp list item exists:
# if isFunction, append to functions list
# while functions list item exists:
# format function and append to cpp list
# create name.hpp file and write using hpp list
# create name.cpp file and write using cpp list
# remove object from classList?
class UmlClass:
def __init__(self, className, hppPub, hppPriv):
self.name = className
self.hppPublic = list(hppPub)
self.hppPrivate = list(hppPriv)
functions = [] # list of functions used to build cpp file
hpp = [] # will contain final hpp template, built from hppPub, hppPriv
cpp = [] # same as hpp, but with implementation file
#def isFunction(): # looks for function syntax
# used when creating cpp file from hpp list
#def checkForLibs(): # include libraries for data types that need them
#def formatFunc(): # formats function from hpp to cpp style
# also takes into account return type and variable names
|
|
e2ed635fb3289a5b45f5f15cd1eb543d87fb93d7
|
wafer/talks/tests/test_review_views.py
|
wafer/talks/tests/test_review_views.py
|
"""Tests for wafer.talk review form behaviour."""
from django.test import Client, TestCase
from django.urls import reverse
from reversion import revisions
from reversion.models import Version
from wafer.talks.models import (SUBMITTED, UNDER_CONSIDERATION,
ReviewAspect, Review)
from wafer.talks.forms import ReviewForm, make_aspect_key
from wafer.tests.utils import create_user
from wafer.talks.tests.fixtures import create_talk
class ReviewFormTests(TestCase):
def setUp(self):
self.reviewer_a = create_user('reviewer_a', perms=('add_review',))
self.talk_a = create_talk('Talk A', SUBMITTED, "author_a")
with revisions.create_revision():
# Ensure we have an initial revision
self.talk_a.save()
self.aspect_1 = ReviewAspect.objects.create(name='General')
self.aspect_2 = ReviewAspect.objects.create(name='Other')
self.client = Client()
def test_review_submission(self):
"""Test that submitting a review works"""
self.client.login(username='reviewer_a', password='reviewer_a_password')
self.assertTrue(Version.objects.get_for_object(self.talk_a), 1)
response = self.client.post(reverse('wafer_talk_review', kwargs={'pk': self.talk_a.pk}),
data={'notes': 'Review notes',
make_aspect_key(self.aspect_1): '1',
make_aspect_key(self.aspect_2): '2'})
self.assertEqual(response.status_code, 302)
review = Review.objects.get(talk=self.talk_a, reviewer=self.reviewer_a)
self.assertEqual(review.avg_score, 1.5)
self.talk_a.refresh_from_db()
self.assertEqual(self.talk_a.status, UNDER_CONSIDERATION)
self.assertTrue(Version.objects.get_for_object(self.talk_a), 2)
self.assertTrue(review.is_current())
|
Add test for posting a review through the view
|
Add test for posting a review through the view
|
Python
|
isc
|
CTPUG/wafer,CTPUG/wafer,CTPUG/wafer,CTPUG/wafer
|
Add test for posting a review through the view
|
"""Tests for wafer.talk review form behaviour."""
from django.test import Client, TestCase
from django.urls import reverse
from reversion import revisions
from reversion.models import Version
from wafer.talks.models import (SUBMITTED, UNDER_CONSIDERATION,
ReviewAspect, Review)
from wafer.talks.forms import ReviewForm, make_aspect_key
from wafer.tests.utils import create_user
from wafer.talks.tests.fixtures import create_talk
class ReviewFormTests(TestCase):
def setUp(self):
self.reviewer_a = create_user('reviewer_a', perms=('add_review',))
self.talk_a = create_talk('Talk A', SUBMITTED, "author_a")
with revisions.create_revision():
# Ensure we have an initial revision
self.talk_a.save()
self.aspect_1 = ReviewAspect.objects.create(name='General')
self.aspect_2 = ReviewAspect.objects.create(name='Other')
self.client = Client()
def test_review_submission(self):
"""Test that submitting a review works"""
self.client.login(username='reviewer_a', password='reviewer_a_password')
self.assertTrue(Version.objects.get_for_object(self.talk_a), 1)
response = self.client.post(reverse('wafer_talk_review', kwargs={'pk': self.talk_a.pk}),
data={'notes': 'Review notes',
make_aspect_key(self.aspect_1): '1',
make_aspect_key(self.aspect_2): '2'})
self.assertEqual(response.status_code, 302)
review = Review.objects.get(talk=self.talk_a, reviewer=self.reviewer_a)
self.assertEqual(review.avg_score, 1.5)
self.talk_a.refresh_from_db()
self.assertEqual(self.talk_a.status, UNDER_CONSIDERATION)
self.assertTrue(Version.objects.get_for_object(self.talk_a), 2)
self.assertTrue(review.is_current())
|
<commit_before><commit_msg>Add test for posting a review through the view<commit_after>
|
"""Tests for wafer.talk review form behaviour."""
from django.test import Client, TestCase
from django.urls import reverse
from reversion import revisions
from reversion.models import Version
from wafer.talks.models import (SUBMITTED, UNDER_CONSIDERATION,
ReviewAspect, Review)
from wafer.talks.forms import ReviewForm, make_aspect_key
from wafer.tests.utils import create_user
from wafer.talks.tests.fixtures import create_talk
class ReviewFormTests(TestCase):
def setUp(self):
self.reviewer_a = create_user('reviewer_a', perms=('add_review',))
self.talk_a = create_talk('Talk A', SUBMITTED, "author_a")
with revisions.create_revision():
# Ensure we have an initial revision
self.talk_a.save()
self.aspect_1 = ReviewAspect.objects.create(name='General')
self.aspect_2 = ReviewAspect.objects.create(name='Other')
self.client = Client()
def test_review_submission(self):
"""Test that submitting a review works"""
self.client.login(username='reviewer_a', password='reviewer_a_password')
self.assertTrue(Version.objects.get_for_object(self.talk_a), 1)
response = self.client.post(reverse('wafer_talk_review', kwargs={'pk': self.talk_a.pk}),
data={'notes': 'Review notes',
make_aspect_key(self.aspect_1): '1',
make_aspect_key(self.aspect_2): '2'})
self.assertEqual(response.status_code, 302)
review = Review.objects.get(talk=self.talk_a, reviewer=self.reviewer_a)
self.assertEqual(review.avg_score, 1.5)
self.talk_a.refresh_from_db()
self.assertEqual(self.talk_a.status, UNDER_CONSIDERATION)
self.assertTrue(Version.objects.get_for_object(self.talk_a), 2)
self.assertTrue(review.is_current())
|
Add test for posting a review through the view"""Tests for wafer.talk review form behaviour."""
from django.test import Client, TestCase
from django.urls import reverse
from reversion import revisions
from reversion.models import Version
from wafer.talks.models import (SUBMITTED, UNDER_CONSIDERATION,
ReviewAspect, Review)
from wafer.talks.forms import ReviewForm, make_aspect_key
from wafer.tests.utils import create_user
from wafer.talks.tests.fixtures import create_talk
class ReviewFormTests(TestCase):
def setUp(self):
self.reviewer_a = create_user('reviewer_a', perms=('add_review',))
self.talk_a = create_talk('Talk A', SUBMITTED, "author_a")
with revisions.create_revision():
# Ensure we have an initial revision
self.talk_a.save()
self.aspect_1 = ReviewAspect.objects.create(name='General')
self.aspect_2 = ReviewAspect.objects.create(name='Other')
self.client = Client()
def test_review_submission(self):
"""Test that submitting a review works"""
self.client.login(username='reviewer_a', password='reviewer_a_password')
self.assertTrue(Version.objects.get_for_object(self.talk_a), 1)
response = self.client.post(reverse('wafer_talk_review', kwargs={'pk': self.talk_a.pk}),
data={'notes': 'Review notes',
make_aspect_key(self.aspect_1): '1',
make_aspect_key(self.aspect_2): '2'})
self.assertEqual(response.status_code, 302)
review = Review.objects.get(talk=self.talk_a, reviewer=self.reviewer_a)
self.assertEqual(review.avg_score, 1.5)
self.talk_a.refresh_from_db()
self.assertEqual(self.talk_a.status, UNDER_CONSIDERATION)
self.assertTrue(Version.objects.get_for_object(self.talk_a), 2)
self.assertTrue(review.is_current())
|
<commit_before><commit_msg>Add test for posting a review through the view<commit_after>"""Tests for wafer.talk review form behaviour."""
from django.test import Client, TestCase
from django.urls import reverse
from reversion import revisions
from reversion.models import Version
from wafer.talks.models import (SUBMITTED, UNDER_CONSIDERATION,
ReviewAspect, Review)
from wafer.talks.forms import ReviewForm, make_aspect_key
from wafer.tests.utils import create_user
from wafer.talks.tests.fixtures import create_talk
class ReviewFormTests(TestCase):
def setUp(self):
self.reviewer_a = create_user('reviewer_a', perms=('add_review',))
self.talk_a = create_talk('Talk A', SUBMITTED, "author_a")
with revisions.create_revision():
# Ensure we have an initial revision
self.talk_a.save()
self.aspect_1 = ReviewAspect.objects.create(name='General')
self.aspect_2 = ReviewAspect.objects.create(name='Other')
self.client = Client()
def test_review_submission(self):
"""Test that submitting a review works"""
self.client.login(username='reviewer_a', password='reviewer_a_password')
self.assertTrue(Version.objects.get_for_object(self.talk_a), 1)
response = self.client.post(reverse('wafer_talk_review', kwargs={'pk': self.talk_a.pk}),
data={'notes': 'Review notes',
make_aspect_key(self.aspect_1): '1',
make_aspect_key(self.aspect_2): '2'})
self.assertEqual(response.status_code, 302)
review = Review.objects.get(talk=self.talk_a, reviewer=self.reviewer_a)
self.assertEqual(review.avg_score, 1.5)
self.talk_a.refresh_from_db()
self.assertEqual(self.talk_a.status, UNDER_CONSIDERATION)
self.assertTrue(Version.objects.get_for_object(self.talk_a), 2)
self.assertTrue(review.is_current())
|
|
466410249867b3eadbe5e2b59c46c95ecd288c6c
|
python_scripts/solr_query_fetch_all.py
|
python_scripts/solr_query_fetch_all.py
|
#!/usr/bin/python
import requests
import ipdb
import time
import csv
import sys
import pysolr
def fetch_all( solr, query ) :
documents = []
num_matching_documents = solr.search( query ).hits
start = 0
rows = num_matching_documents
sys.stderr.write( ' starting fetch for ' + query )
while ( len( documents ) < num_matching_documents ) :
results = solr.search( query, **{
'start': start,
'rows': rows,
# 'fl' : 'media_id',
})
documents.extend( results.docs )
start += rows
assert len( documents ) <= num_matching_documents
assert len( documents ) == num_matching_documents
return documents
solr = pysolr.Solr('http://localhost:8983/solr/')
queries = [ '*:*',
]
for query in queries:
print query
results = fetch_all( solr, query )
print "got " + query
print results
|
Add script for word counts
|
Add script for word counts
|
Python
|
agpl-3.0
|
berkmancenter/mediacloud,AchyuthIIIT/mediacloud,AchyuthIIIT/mediacloud,berkmancenter/mediacloud,AchyuthIIIT/mediacloud,AchyuthIIIT/mediacloud,AchyuthIIIT/mediacloud,AchyuthIIIT/mediacloud,berkmancenter/mediacloud,berkmancenter/mediacloud,AchyuthIIIT/mediacloud,AchyuthIIIT/mediacloud,AchyuthIIIT/mediacloud,berkmancenter/mediacloud
|
Add script for word counts
|
#!/usr/bin/python
import requests
import ipdb
import time
import csv
import sys
import pysolr
def fetch_all( solr, query ) :
documents = []
num_matching_documents = solr.search( query ).hits
start = 0
rows = num_matching_documents
sys.stderr.write( ' starting fetch for ' + query )
while ( len( documents ) < num_matching_documents ) :
results = solr.search( query, **{
'start': start,
'rows': rows,
# 'fl' : 'media_id',
})
documents.extend( results.docs )
start += rows
assert len( documents ) <= num_matching_documents
assert len( documents ) == num_matching_documents
return documents
solr = pysolr.Solr('http://localhost:8983/solr/')
queries = [ '*:*',
]
for query in queries:
print query
results = fetch_all( solr, query )
print "got " + query
print results
|
<commit_before><commit_msg>Add script for word counts<commit_after>
|
#!/usr/bin/python
import requests
import ipdb
import time
import csv
import sys
import pysolr
def fetch_all( solr, query ) :
documents = []
num_matching_documents = solr.search( query ).hits
start = 0
rows = num_matching_documents
sys.stderr.write( ' starting fetch for ' + query )
while ( len( documents ) < num_matching_documents ) :
results = solr.search( query, **{
'start': start,
'rows': rows,
# 'fl' : 'media_id',
})
documents.extend( results.docs )
start += rows
assert len( documents ) <= num_matching_documents
assert len( documents ) == num_matching_documents
return documents
solr = pysolr.Solr('http://localhost:8983/solr/')
queries = [ '*:*',
]
for query in queries:
print query
results = fetch_all( solr, query )
print "got " + query
print results
|
Add script for word counts#!/usr/bin/python
import requests
import ipdb
import time
import csv
import sys
import pysolr
def fetch_all( solr, query ) :
documents = []
num_matching_documents = solr.search( query ).hits
start = 0
rows = num_matching_documents
sys.stderr.write( ' starting fetch for ' + query )
while ( len( documents ) < num_matching_documents ) :
results = solr.search( query, **{
'start': start,
'rows': rows,
# 'fl' : 'media_id',
})
documents.extend( results.docs )
start += rows
assert len( documents ) <= num_matching_documents
assert len( documents ) == num_matching_documents
return documents
solr = pysolr.Solr('http://localhost:8983/solr/')
queries = [ '*:*',
]
for query in queries:
print query
results = fetch_all( solr, query )
print "got " + query
print results
|
<commit_before><commit_msg>Add script for word counts<commit_after>#!/usr/bin/python
import requests
import ipdb
import time
import csv
import sys
import pysolr
def fetch_all( solr, query ) :
documents = []
num_matching_documents = solr.search( query ).hits
start = 0
rows = num_matching_documents
sys.stderr.write( ' starting fetch for ' + query )
while ( len( documents ) < num_matching_documents ) :
results = solr.search( query, **{
'start': start,
'rows': rows,
# 'fl' : 'media_id',
})
documents.extend( results.docs )
start += rows
assert len( documents ) <= num_matching_documents
assert len( documents ) == num_matching_documents
return documents
solr = pysolr.Solr('http://localhost:8983/solr/')
queries = [ '*:*',
]
for query in queries:
print query
results = fetch_all( solr, query )
print "got " + query
print results
|
|
7182af317116db7eb3f7a278b3487ad91a3b3331
|
high-res-slider.py
|
high-res-slider.py
|
import functools
import numpy as np
import dask.array as da
from magicgui.widgets import Slider, Container
import napari
# stack = ... # your dask array
# stack2 = stack[::2, ::2, ::2]
# stack4 = stack2[::2, ::2, ::2]
# 👆 quick and easy multiscale pyramid, don't do this really
# see https://github.com/dask/dask-image/issues/136
# for better ways
# and, specifically, stack4 will be small but will still need
# to access full data. You should save all data sizes as
# their own arrays on disk and load those. I recommend
# using dask.array.Array.to_zarr.
# You can also read about NGFF:
# https://ngff.openmicroscopy.org/latest/
# example with some example data from Liu et al, Science, 2018
stack, stack2, stack4 = [
da.from_zarr(f'/Users/jni/data/gokul-lls/{i}.zarr')[0]
for i in range(3)
]
# a list of arrays of decreasing size is interpreted as
# a multiscale dataset by napari
multiscale_data = [stack, stack2, stack4]
viewer = napari.Viewer(ndisplay=3)
multiscale_layer = viewer.add_image(
multiscale_data,
colormap='magenta',
scale=[3, 1, 1],
)
crop_sizes = (30, 256, 256)
cropz, cropy, cropx = crop_sizes
shapez, shapey, shapex = stack.shape
ends = np.asarray(stack.shape) - np.asarray(crop_sizes) + 1
stepsizes = ends // 100
highres_crop_layer = viewer.add_image(
stack[:cropz, :cropy, :cropx],
name='cropped',
blending='additive',
colormap='green',
scale=multiscale_layer.scale,
)
def set_slice(axis, value):
idx = int(value)
scale = np.asarray(highres_crop_layer.scale)
translate = np.asarray(highres_crop_layer.translate)
izyx = translate // scale
izyx[axis] = idx
i, j, k = izyx
highres_crop_layer.data = stack[i:i + cropz, j:j + cropy, k:k + cropx]
highres_crop_layer.translate = scale * izyx
highres_crop_layer.refresh()
sliders = [
Slider(name=axis, min=0, max=end, step=step)
for axis, end, step in zip('zyx', ends, stepsizes)
]
for axis, slider in enumerate(sliders):
slider.changed.connect(
lambda event, axis=axis: set_slice(axis, event.value)
)
container_widget = Container(layout='vertical')
container_widget.extend(sliders)
viewer.window.add_dock_widget(container_widget, area='right')
napari.run()
|
Add example for a clunky 3D high resolution loupe for napari
|
Add example for a clunky 3D high resolution loupe for napari
|
Python
|
bsd-3-clause
|
jni/useful-histories
|
Add example for a clunky 3D high resolution loupe for napari
|
import functools
import numpy as np
import dask.array as da
from magicgui.widgets import Slider, Container
import napari
# stack = ... # your dask array
# stack2 = stack[::2, ::2, ::2]
# stack4 = stack2[::2, ::2, ::2]
# 👆 quick and easy multiscale pyramid, don't do this really
# see https://github.com/dask/dask-image/issues/136
# for better ways
# and, specifically, stack4 will be small but will still need
# to access full data. You should save all data sizes as
# their own arrays on disk and load those. I recommend
# using dask.array.Array.to_zarr.
# You can also read about NGFF:
# https://ngff.openmicroscopy.org/latest/
# example with some example data from Liu et al, Science, 2018
stack, stack2, stack4 = [
da.from_zarr(f'/Users/jni/data/gokul-lls/{i}.zarr')[0]
for i in range(3)
]
# a list of arrays of decreasing size is interpreted as
# a multiscale dataset by napari
multiscale_data = [stack, stack2, stack4]
viewer = napari.Viewer(ndisplay=3)
multiscale_layer = viewer.add_image(
multiscale_data,
colormap='magenta',
scale=[3, 1, 1],
)
crop_sizes = (30, 256, 256)
cropz, cropy, cropx = crop_sizes
shapez, shapey, shapex = stack.shape
ends = np.asarray(stack.shape) - np.asarray(crop_sizes) + 1
stepsizes = ends // 100
highres_crop_layer = viewer.add_image(
stack[:cropz, :cropy, :cropx],
name='cropped',
blending='additive',
colormap='green',
scale=multiscale_layer.scale,
)
def set_slice(axis, value):
idx = int(value)
scale = np.asarray(highres_crop_layer.scale)
translate = np.asarray(highres_crop_layer.translate)
izyx = translate // scale
izyx[axis] = idx
i, j, k = izyx
highres_crop_layer.data = stack[i:i + cropz, j:j + cropy, k:k + cropx]
highres_crop_layer.translate = scale * izyx
highres_crop_layer.refresh()
sliders = [
Slider(name=axis, min=0, max=end, step=step)
for axis, end, step in zip('zyx', ends, stepsizes)
]
for axis, slider in enumerate(sliders):
slider.changed.connect(
lambda event, axis=axis: set_slice(axis, event.value)
)
container_widget = Container(layout='vertical')
container_widget.extend(sliders)
viewer.window.add_dock_widget(container_widget, area='right')
napari.run()
|
<commit_before><commit_msg>Add example for a clunky 3D high resolution loupe for napari<commit_after>
|
import functools
import numpy as np
import dask.array as da
from magicgui.widgets import Slider, Container
import napari
# stack = ... # your dask array
# stack2 = stack[::2, ::2, ::2]
# stack4 = stack2[::2, ::2, ::2]
# 👆 quick and easy multiscale pyramid, don't do this really
# see https://github.com/dask/dask-image/issues/136
# for better ways
# and, specifically, stack4 will be small but will still need
# to access full data. You should save all data sizes as
# their own arrays on disk and load those. I recommend
# using dask.array.Array.to_zarr.
# You can also read about NGFF:
# https://ngff.openmicroscopy.org/latest/
# example with some example data from Liu et al, Science, 2018
stack, stack2, stack4 = [
da.from_zarr(f'/Users/jni/data/gokul-lls/{i}.zarr')[0]
for i in range(3)
]
# a list of arrays of decreasing size is interpreted as
# a multiscale dataset by napari
multiscale_data = [stack, stack2, stack4]
viewer = napari.Viewer(ndisplay=3)
multiscale_layer = viewer.add_image(
multiscale_data,
colormap='magenta',
scale=[3, 1, 1],
)
crop_sizes = (30, 256, 256)
cropz, cropy, cropx = crop_sizes
shapez, shapey, shapex = stack.shape
ends = np.asarray(stack.shape) - np.asarray(crop_sizes) + 1
stepsizes = ends // 100
highres_crop_layer = viewer.add_image(
stack[:cropz, :cropy, :cropx],
name='cropped',
blending='additive',
colormap='green',
scale=multiscale_layer.scale,
)
def set_slice(axis, value):
idx = int(value)
scale = np.asarray(highres_crop_layer.scale)
translate = np.asarray(highres_crop_layer.translate)
izyx = translate // scale
izyx[axis] = idx
i, j, k = izyx
highres_crop_layer.data = stack[i:i + cropz, j:j + cropy, k:k + cropx]
highres_crop_layer.translate = scale * izyx
highres_crop_layer.refresh()
sliders = [
Slider(name=axis, min=0, max=end, step=step)
for axis, end, step in zip('zyx', ends, stepsizes)
]
for axis, slider in enumerate(sliders):
slider.changed.connect(
lambda event, axis=axis: set_slice(axis, event.value)
)
container_widget = Container(layout='vertical')
container_widget.extend(sliders)
viewer.window.add_dock_widget(container_widget, area='right')
napari.run()
|
Add example for a clunky 3D high resolution loupe for napariimport functools
import numpy as np
import dask.array as da
from magicgui.widgets import Slider, Container
import napari
# stack = ... # your dask array
# stack2 = stack[::2, ::2, ::2]
# stack4 = stack2[::2, ::2, ::2]
# 👆 quick and easy multiscale pyramid, don't do this really
# see https://github.com/dask/dask-image/issues/136
# for better ways
# and, specifically, stack4 will be small but will still need
# to access full data. You should save all data sizes as
# their own arrays on disk and load those. I recommend
# using dask.array.Array.to_zarr.
# You can also read about NGFF:
# https://ngff.openmicroscopy.org/latest/
# example with some example data from Liu et al, Science, 2018
stack, stack2, stack4 = [
da.from_zarr(f'/Users/jni/data/gokul-lls/{i}.zarr')[0]
for i in range(3)
]
# a list of arrays of decreasing size is interpreted as
# a multiscale dataset by napari
multiscale_data = [stack, stack2, stack4]
viewer = napari.Viewer(ndisplay=3)
multiscale_layer = viewer.add_image(
multiscale_data,
colormap='magenta',
scale=[3, 1, 1],
)
crop_sizes = (30, 256, 256)
cropz, cropy, cropx = crop_sizes
shapez, shapey, shapex = stack.shape
ends = np.asarray(stack.shape) - np.asarray(crop_sizes) + 1
stepsizes = ends // 100
highres_crop_layer = viewer.add_image(
stack[:cropz, :cropy, :cropx],
name='cropped',
blending='additive',
colormap='green',
scale=multiscale_layer.scale,
)
def set_slice(axis, value):
idx = int(value)
scale = np.asarray(highres_crop_layer.scale)
translate = np.asarray(highres_crop_layer.translate)
izyx = translate // scale
izyx[axis] = idx
i, j, k = izyx
highres_crop_layer.data = stack[i:i + cropz, j:j + cropy, k:k + cropx]
highres_crop_layer.translate = scale * izyx
highres_crop_layer.refresh()
sliders = [
Slider(name=axis, min=0, max=end, step=step)
for axis, end, step in zip('zyx', ends, stepsizes)
]
for axis, slider in enumerate(sliders):
slider.changed.connect(
lambda event, axis=axis: set_slice(axis, event.value)
)
container_widget = Container(layout='vertical')
container_widget.extend(sliders)
viewer.window.add_dock_widget(container_widget, area='right')
napari.run()
|
<commit_before><commit_msg>Add example for a clunky 3D high resolution loupe for napari<commit_after>import functools
import numpy as np
import dask.array as da
from magicgui.widgets import Slider, Container
import napari
# stack = ... # your dask array
# stack2 = stack[::2, ::2, ::2]
# stack4 = stack2[::2, ::2, ::2]
# 👆 quick and easy multiscale pyramid, don't do this really
# see https://github.com/dask/dask-image/issues/136
# for better ways
# and, specifically, stack4 will be small but will still need
# to access full data. You should save all data sizes as
# their own arrays on disk and load those. I recommend
# using dask.array.Array.to_zarr.
# You can also read about NGFF:
# https://ngff.openmicroscopy.org/latest/
# example with some example data from Liu et al, Science, 2018
stack, stack2, stack4 = [
da.from_zarr(f'/Users/jni/data/gokul-lls/{i}.zarr')[0]
for i in range(3)
]
# a list of arrays of decreasing size is interpreted as
# a multiscale dataset by napari
multiscale_data = [stack, stack2, stack4]
viewer = napari.Viewer(ndisplay=3)
multiscale_layer = viewer.add_image(
multiscale_data,
colormap='magenta',
scale=[3, 1, 1],
)
crop_sizes = (30, 256, 256)
cropz, cropy, cropx = crop_sizes
shapez, shapey, shapex = stack.shape
ends = np.asarray(stack.shape) - np.asarray(crop_sizes) + 1
stepsizes = ends // 100
highres_crop_layer = viewer.add_image(
stack[:cropz, :cropy, :cropx],
name='cropped',
blending='additive',
colormap='green',
scale=multiscale_layer.scale,
)
def set_slice(axis, value):
idx = int(value)
scale = np.asarray(highres_crop_layer.scale)
translate = np.asarray(highres_crop_layer.translate)
izyx = translate // scale
izyx[axis] = idx
i, j, k = izyx
highres_crop_layer.data = stack[i:i + cropz, j:j + cropy, k:k + cropx]
highres_crop_layer.translate = scale * izyx
highres_crop_layer.refresh()
sliders = [
Slider(name=axis, min=0, max=end, step=step)
for axis, end, step in zip('zyx', ends, stepsizes)
]
for axis, slider in enumerate(sliders):
slider.changed.connect(
lambda event, axis=axis: set_slice(axis, event.value)
)
container_widget = Container(layout='vertical')
container_widget.extend(sliders)
viewer.window.add_dock_widget(container_widget, area='right')
napari.run()
|
|
72f32099411644a3fed6103430f7dd78fb0929a5
|
konstrukteur/ContentParser.py
|
konstrukteur/ContentParser.py
|
#
# Konstrukteur - Static website generator
# Copyright 2013 Sebastian Fastner
#
import glob, os
from jasy.core import Console
import konstrukteur.Language
import konstrukteur.Util
class ContentParser:
""" Content parser class for Konstrukteur """
def __init__(self, extensions, fixJasyCommands, defaultLanguage):
self.__extensions = extensions
self.__extensionParser = {}
self.__extensionParser["html"] = konstrukteur.HtmlParser
self.__id = 1
self.__commandReplacer = []
self.__fixJasyCommands = fixJasyCommands
self.__languages = {}
self.__defaultLanguage = defaultLanguage
def parse(self, pagesPath, pages, languages):
#pagesPath = os.path.join(self.__contentPath, sourcePath)
Console.info("Parse content files at %s" % pagesPath)
Console.indent()
for extension in self.__extensions:
for filename in glob.iglob(os.path.join(pagesPath, "*.%s" % extension)):
basename = os.path.basename(filename)
Console.debug("Parsing %s" % basename)
page = self.__parseContentFile(filename, extension)
if page:
self.generateFields(page, languages)
pages.append(page)
else:
Console.error("Error parsing %s" % filename)
Console.outdent()
def generateFields(self, page, languages):
for key, value in page.items():
page[key] = self.__fixJasyCommands(value)
if "slug" in page:
page["slug"] =konstrukteur.Util.fixSlug(page["slug"])
else:
page["slug"] = konstrukteur.Util.fixSlug(page["title"])
page["content"] = konstrukteur.Util.fixCoreTemplating(page["content"])
if not "status" in page:
page["status"] = "published"
if not "pos" in page:
page["pos"] = 0
else:
page["pos"] = int(page["pos"])
if not "lang" in page:
page["lang"] = self.__defaultLanguage
if page["lang"] not in languages:
languages.append(page["lang"])
return page
def __parseContentFile(self, filename, extension):
""" Parse single content file """
if not extension in self.__extensionParser:
raise RuntimeError("No content parser for extension %s registered" % extension)
return self.__extensionParser[extension].parse(filename)
|
Add new content parser class (based upon code in Konstruktuer)
|
Add new content parser class (based upon code in Konstruktuer)
|
Python
|
mit
|
fastner/konstrukteur,fastner/konstrukteur,fastner/konstrukteur
|
Add new content parser class (based upon code in Konstruktuer)
|
#
# Konstrukteur - Static website generator
# Copyright 2013 Sebastian Fastner
#
import glob, os
from jasy.core import Console
import konstrukteur.Language
import konstrukteur.Util
class ContentParser:
""" Content parser class for Konstrukteur """
def __init__(self, extensions, fixJasyCommands, defaultLanguage):
self.__extensions = extensions
self.__extensionParser = {}
self.__extensionParser["html"] = konstrukteur.HtmlParser
self.__id = 1
self.__commandReplacer = []
self.__fixJasyCommands = fixJasyCommands
self.__languages = {}
self.__defaultLanguage = defaultLanguage
def parse(self, pagesPath, pages, languages):
#pagesPath = os.path.join(self.__contentPath, sourcePath)
Console.info("Parse content files at %s" % pagesPath)
Console.indent()
for extension in self.__extensions:
for filename in glob.iglob(os.path.join(pagesPath, "*.%s" % extension)):
basename = os.path.basename(filename)
Console.debug("Parsing %s" % basename)
page = self.__parseContentFile(filename, extension)
if page:
self.generateFields(page, languages)
pages.append(page)
else:
Console.error("Error parsing %s" % filename)
Console.outdent()
def generateFields(self, page, languages):
for key, value in page.items():
page[key] = self.__fixJasyCommands(value)
if "slug" in page:
page["slug"] =konstrukteur.Util.fixSlug(page["slug"])
else:
page["slug"] = konstrukteur.Util.fixSlug(page["title"])
page["content"] = konstrukteur.Util.fixCoreTemplating(page["content"])
if not "status" in page:
page["status"] = "published"
if not "pos" in page:
page["pos"] = 0
else:
page["pos"] = int(page["pos"])
if not "lang" in page:
page["lang"] = self.__defaultLanguage
if page["lang"] not in languages:
languages.append(page["lang"])
return page
def __parseContentFile(self, filename, extension):
""" Parse single content file """
if not extension in self.__extensionParser:
raise RuntimeError("No content parser for extension %s registered" % extension)
return self.__extensionParser[extension].parse(filename)
|
<commit_before><commit_msg>Add new content parser class (based upon code in Konstruktuer)<commit_after>
|
#
# Konstrukteur - Static website generator
# Copyright 2013 Sebastian Fastner
#
import glob, os
from jasy.core import Console
import konstrukteur.Language
import konstrukteur.Util
class ContentParser:
""" Content parser class for Konstrukteur """
def __init__(self, extensions, fixJasyCommands, defaultLanguage):
self.__extensions = extensions
self.__extensionParser = {}
self.__extensionParser["html"] = konstrukteur.HtmlParser
self.__id = 1
self.__commandReplacer = []
self.__fixJasyCommands = fixJasyCommands
self.__languages = {}
self.__defaultLanguage = defaultLanguage
def parse(self, pagesPath, pages, languages):
#pagesPath = os.path.join(self.__contentPath, sourcePath)
Console.info("Parse content files at %s" % pagesPath)
Console.indent()
for extension in self.__extensions:
for filename in glob.iglob(os.path.join(pagesPath, "*.%s" % extension)):
basename = os.path.basename(filename)
Console.debug("Parsing %s" % basename)
page = self.__parseContentFile(filename, extension)
if page:
self.generateFields(page, languages)
pages.append(page)
else:
Console.error("Error parsing %s" % filename)
Console.outdent()
def generateFields(self, page, languages):
for key, value in page.items():
page[key] = self.__fixJasyCommands(value)
if "slug" in page:
page["slug"] =konstrukteur.Util.fixSlug(page["slug"])
else:
page["slug"] = konstrukteur.Util.fixSlug(page["title"])
page["content"] = konstrukteur.Util.fixCoreTemplating(page["content"])
if not "status" in page:
page["status"] = "published"
if not "pos" in page:
page["pos"] = 0
else:
page["pos"] = int(page["pos"])
if not "lang" in page:
page["lang"] = self.__defaultLanguage
if page["lang"] not in languages:
languages.append(page["lang"])
return page
def __parseContentFile(self, filename, extension):
""" Parse single content file """
if not extension in self.__extensionParser:
raise RuntimeError("No content parser for extension %s registered" % extension)
return self.__extensionParser[extension].parse(filename)
|
Add new content parser class (based upon code in Konstruktuer)#
# Konstrukteur - Static website generator
# Copyright 2013 Sebastian Fastner
#
import glob, os
from jasy.core import Console
import konstrukteur.Language
import konstrukteur.Util
class ContentParser:
""" Content parser class for Konstrukteur """
def __init__(self, extensions, fixJasyCommands, defaultLanguage):
self.__extensions = extensions
self.__extensionParser = {}
self.__extensionParser["html"] = konstrukteur.HtmlParser
self.__id = 1
self.__commandReplacer = []
self.__fixJasyCommands = fixJasyCommands
self.__languages = {}
self.__defaultLanguage = defaultLanguage
def parse(self, pagesPath, pages, languages):
#pagesPath = os.path.join(self.__contentPath, sourcePath)
Console.info("Parse content files at %s" % pagesPath)
Console.indent()
for extension in self.__extensions:
for filename in glob.iglob(os.path.join(pagesPath, "*.%s" % extension)):
basename = os.path.basename(filename)
Console.debug("Parsing %s" % basename)
page = self.__parseContentFile(filename, extension)
if page:
self.generateFields(page, languages)
pages.append(page)
else:
Console.error("Error parsing %s" % filename)
Console.outdent()
def generateFields(self, page, languages):
for key, value in page.items():
page[key] = self.__fixJasyCommands(value)
if "slug" in page:
page["slug"] =konstrukteur.Util.fixSlug(page["slug"])
else:
page["slug"] = konstrukteur.Util.fixSlug(page["title"])
page["content"] = konstrukteur.Util.fixCoreTemplating(page["content"])
if not "status" in page:
page["status"] = "published"
if not "pos" in page:
page["pos"] = 0
else:
page["pos"] = int(page["pos"])
if not "lang" in page:
page["lang"] = self.__defaultLanguage
if page["lang"] not in languages:
languages.append(page["lang"])
return page
def __parseContentFile(self, filename, extension):
""" Parse single content file """
if not extension in self.__extensionParser:
raise RuntimeError("No content parser for extension %s registered" % extension)
return self.__extensionParser[extension].parse(filename)
|
<commit_before><commit_msg>Add new content parser class (based upon code in Konstruktuer)<commit_after>#
# Konstrukteur - Static website generator
# Copyright 2013 Sebastian Fastner
#
import glob, os
from jasy.core import Console
import konstrukteur.Language
import konstrukteur.Util
class ContentParser:
""" Content parser class for Konstrukteur """
def __init__(self, extensions, fixJasyCommands, defaultLanguage):
self.__extensions = extensions
self.__extensionParser = {}
self.__extensionParser["html"] = konstrukteur.HtmlParser
self.__id = 1
self.__commandReplacer = []
self.__fixJasyCommands = fixJasyCommands
self.__languages = {}
self.__defaultLanguage = defaultLanguage
def parse(self, pagesPath, pages, languages):
#pagesPath = os.path.join(self.__contentPath, sourcePath)
Console.info("Parse content files at %s" % pagesPath)
Console.indent()
for extension in self.__extensions:
for filename in glob.iglob(os.path.join(pagesPath, "*.%s" % extension)):
basename = os.path.basename(filename)
Console.debug("Parsing %s" % basename)
page = self.__parseContentFile(filename, extension)
if page:
self.generateFields(page, languages)
pages.append(page)
else:
Console.error("Error parsing %s" % filename)
Console.outdent()
def generateFields(self, page, languages):
for key, value in page.items():
page[key] = self.__fixJasyCommands(value)
if "slug" in page:
page["slug"] =konstrukteur.Util.fixSlug(page["slug"])
else:
page["slug"] = konstrukteur.Util.fixSlug(page["title"])
page["content"] = konstrukteur.Util.fixCoreTemplating(page["content"])
if not "status" in page:
page["status"] = "published"
if not "pos" in page:
page["pos"] = 0
else:
page["pos"] = int(page["pos"])
if not "lang" in page:
page["lang"] = self.__defaultLanguage
if page["lang"] not in languages:
languages.append(page["lang"])
return page
def __parseContentFile(self, filename, extension):
""" Parse single content file """
if not extension in self.__extensionParser:
raise RuntimeError("No content parser for extension %s registered" % extension)
return self.__extensionParser[extension].parse(filename)
|
|
8939e873f4ea61169f9384eded5b8c603cfde988
|
crypto/PRESUBMIT.py
|
crypto/PRESUBMIT.py
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Chromium presubmit script for src/net.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
def GetPreferredTrySlaves(project, change):
# Changes in crypto often need a corresponding OpenSSL edit.
return ['linux_redux']
|
Add crypto pre-submit that will add the openssl builder to the default try-bot list.
|
Add crypto pre-submit that will add the openssl builder to the default try-bot list.
BUG=None
TEST=git try should run a linux_redux try job too.
Review URL: http://codereview.chromium.org/9235031
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@119094 0039d316-1c4b-4281-b951-d872f2087c98
|
Python
|
bsd-3-clause
|
ropik/chromium,gavinp/chromium,yitian134/chromium,gavinp/chromium,gavinp/chromium,yitian134/chromium,yitian134/chromium,gavinp/chromium,gavinp/chromium,ropik/chromium,yitian134/chromium,ropik/chromium,ropik/chromium,adobe/chromium,gavinp/chromium,yitian134/chromium,adobe/chromium,ropik/chromium,adobe/chromium,ropik/chromium,adobe/chromium,yitian134/chromium,yitian134/chromium,gavinp/chromium,adobe/chromium,gavinp/chromium,adobe/chromium,yitian134/chromium,yitian134/chromium,ropik/chromium,adobe/chromium,yitian134/chromium,gavinp/chromium,adobe/chromium,ropik/chromium,adobe/chromium,adobe/chromium,gavinp/chromium,ropik/chromium,adobe/chromium
|
Add crypto pre-submit that will add the openssl builder to the default try-bot list.
BUG=None
TEST=git try should run a linux_redux try job too.
Review URL: http://codereview.chromium.org/9235031
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@119094 0039d316-1c4b-4281-b951-d872f2087c98
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Chromium presubmit script for src/net.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
def GetPreferredTrySlaves(project, change):
# Changes in crypto often need a corresponding OpenSSL edit.
return ['linux_redux']
|
<commit_before><commit_msg>Add crypto pre-submit that will add the openssl builder to the default try-bot list.
BUG=None
TEST=git try should run a linux_redux try job too.
Review URL: http://codereview.chromium.org/9235031
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@119094 0039d316-1c4b-4281-b951-d872f2087c98<commit_after>
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Chromium presubmit script for src/net.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
def GetPreferredTrySlaves(project, change):
# Changes in crypto often need a corresponding OpenSSL edit.
return ['linux_redux']
|
Add crypto pre-submit that will add the openssl builder to the default try-bot list.
BUG=None
TEST=git try should run a linux_redux try job too.
Review URL: http://codereview.chromium.org/9235031
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@119094 0039d316-1c4b-4281-b951-d872f2087c98# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Chromium presubmit script for src/net.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
def GetPreferredTrySlaves(project, change):
# Changes in crypto often need a corresponding OpenSSL edit.
return ['linux_redux']
|
<commit_before><commit_msg>Add crypto pre-submit that will add the openssl builder to the default try-bot list.
BUG=None
TEST=git try should run a linux_redux try job too.
Review URL: http://codereview.chromium.org/9235031
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@119094 0039d316-1c4b-4281-b951-d872f2087c98<commit_after># Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Chromium presubmit script for src/net.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
def GetPreferredTrySlaves(project, change):
# Changes in crypto often need a corresponding OpenSSL edit.
return ['linux_redux']
|
|
50d05aabc2eb1d5bcb20d457dd05d2882b983afa
|
install_and_run.py
|
install_and_run.py
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Install and run the TensorBoard plugin for performance analysis.
Usage: python3 install_and_run.py --envdir ENVDIR --logdir LOGDIR
"""
# Lint as: python3
import argparse
import os
import subprocess
def run(*args):
"""Runs a shell command."""
subprocess.run(' '.join(args), shell=True, check=True)
class VirtualEnv(object):
"""Creates and runs programs in a virtual environment."""
def __init__(self, envdir):
self.envdir = envdir
run('virtualenv', '--system-site-packages', '-p', 'python3', self.envdir)
def run(self, program, *args):
run(os.path.join(self.envdir, 'bin', program), *args)
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--envdir', help='Virtual environment', required=True)
parser.add_argument('--logdir', help='TensorBoard logdir', required=True)
args = parser.parse_args()
venv = VirtualEnv(args.envdir)
venv.run('pip3', 'uninstall', '-q', '-y', 'tensorboard')
venv.run('pip3', 'uninstall', '-q', '-y', 'tensorflow')
venv.run('pip3', 'install', '-q', '-U', 'tf-nightly')
venv.run('pip3', 'install', '-q', '-U', 'tb-nightly')
venv.run('pip3', 'install', '-q', '-U', 'tensorboard_plugin_profile')
venv.run('tensorboard', '--logdir=' + args.logdir, '--bind_all')
if __name__ == '__main__':
main()
|
Add installation script for profiler.
|
Add installation script for profiler.
|
Python
|
apache-2.0
|
tensorflow/profiler,tensorflow/profiler,tensorflow/profiler,tensorflow/profiler,tensorflow/profiler
|
Add installation script for profiler.
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Install and run the TensorBoard plugin for performance analysis.
Usage: python3 install_and_run.py --envdir ENVDIR --logdir LOGDIR
"""
# Lint as: python3
import argparse
import os
import subprocess
def run(*args):
"""Runs a shell command."""
subprocess.run(' '.join(args), shell=True, check=True)
class VirtualEnv(object):
"""Creates and runs programs in a virtual environment."""
def __init__(self, envdir):
self.envdir = envdir
run('virtualenv', '--system-site-packages', '-p', 'python3', self.envdir)
def run(self, program, *args):
run(os.path.join(self.envdir, 'bin', program), *args)
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--envdir', help='Virtual environment', required=True)
parser.add_argument('--logdir', help='TensorBoard logdir', required=True)
args = parser.parse_args()
venv = VirtualEnv(args.envdir)
venv.run('pip3', 'uninstall', '-q', '-y', 'tensorboard')
venv.run('pip3', 'uninstall', '-q', '-y', 'tensorflow')
venv.run('pip3', 'install', '-q', '-U', 'tf-nightly')
venv.run('pip3', 'install', '-q', '-U', 'tb-nightly')
venv.run('pip3', 'install', '-q', '-U', 'tensorboard_plugin_profile')
venv.run('tensorboard', '--logdir=' + args.logdir, '--bind_all')
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add installation script for profiler.<commit_after>
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Install and run the TensorBoard plugin for performance analysis.
Usage: python3 install_and_run.py --envdir ENVDIR --logdir LOGDIR
"""
# Lint as: python3
import argparse
import os
import subprocess
def run(*args):
"""Runs a shell command."""
subprocess.run(' '.join(args), shell=True, check=True)
class VirtualEnv(object):
"""Creates and runs programs in a virtual environment."""
def __init__(self, envdir):
self.envdir = envdir
run('virtualenv', '--system-site-packages', '-p', 'python3', self.envdir)
def run(self, program, *args):
run(os.path.join(self.envdir, 'bin', program), *args)
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--envdir', help='Virtual environment', required=True)
parser.add_argument('--logdir', help='TensorBoard logdir', required=True)
args = parser.parse_args()
venv = VirtualEnv(args.envdir)
venv.run('pip3', 'uninstall', '-q', '-y', 'tensorboard')
venv.run('pip3', 'uninstall', '-q', '-y', 'tensorflow')
venv.run('pip3', 'install', '-q', '-U', 'tf-nightly')
venv.run('pip3', 'install', '-q', '-U', 'tb-nightly')
venv.run('pip3', 'install', '-q', '-U', 'tensorboard_plugin_profile')
venv.run('tensorboard', '--logdir=' + args.logdir, '--bind_all')
if __name__ == '__main__':
main()
|
Add installation script for profiler.# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Install and run the TensorBoard plugin for performance analysis.
Usage: python3 install_and_run.py --envdir ENVDIR --logdir LOGDIR
"""
# Lint as: python3
import argparse
import os
import subprocess
def run(*args):
"""Runs a shell command."""
subprocess.run(' '.join(args), shell=True, check=True)
class VirtualEnv(object):
"""Creates and runs programs in a virtual environment."""
def __init__(self, envdir):
self.envdir = envdir
run('virtualenv', '--system-site-packages', '-p', 'python3', self.envdir)
def run(self, program, *args):
run(os.path.join(self.envdir, 'bin', program), *args)
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--envdir', help='Virtual environment', required=True)
parser.add_argument('--logdir', help='TensorBoard logdir', required=True)
args = parser.parse_args()
venv = VirtualEnv(args.envdir)
venv.run('pip3', 'uninstall', '-q', '-y', 'tensorboard')
venv.run('pip3', 'uninstall', '-q', '-y', 'tensorflow')
venv.run('pip3', 'install', '-q', '-U', 'tf-nightly')
venv.run('pip3', 'install', '-q', '-U', 'tb-nightly')
venv.run('pip3', 'install', '-q', '-U', 'tensorboard_plugin_profile')
venv.run('tensorboard', '--logdir=' + args.logdir, '--bind_all')
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add installation script for profiler.<commit_after># Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Install and run the TensorBoard plugin for performance analysis.
Usage: python3 install_and_run.py --envdir ENVDIR --logdir LOGDIR
"""
# Lint as: python3
import argparse
import os
import subprocess
def run(*args):
"""Runs a shell command."""
subprocess.run(' '.join(args), shell=True, check=True)
class VirtualEnv(object):
"""Creates and runs programs in a virtual environment."""
def __init__(self, envdir):
self.envdir = envdir
run('virtualenv', '--system-site-packages', '-p', 'python3', self.envdir)
def run(self, program, *args):
run(os.path.join(self.envdir, 'bin', program), *args)
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--envdir', help='Virtual environment', required=True)
parser.add_argument('--logdir', help='TensorBoard logdir', required=True)
args = parser.parse_args()
venv = VirtualEnv(args.envdir)
venv.run('pip3', 'uninstall', '-q', '-y', 'tensorboard')
venv.run('pip3', 'uninstall', '-q', '-y', 'tensorflow')
venv.run('pip3', 'install', '-q', '-U', 'tf-nightly')
venv.run('pip3', 'install', '-q', '-U', 'tb-nightly')
venv.run('pip3', 'install', '-q', '-U', 'tensorboard_plugin_profile')
venv.run('tensorboard', '--logdir=' + args.logdir, '--bind_all')
if __name__ == '__main__':
main()
|
|
734967196c8f0577b218802c16d9eab31c9e9054
|
problem_36.py
|
problem_36.py
|
from time import time
def is_palindrome(s):
for idx in range(len(s)/2):
if s[idx] != s[-1*idx - 1]:
return False
return True
def main():
palindrom_nums = [num for num in range(int(1e6)) if is_palindrome(str(num)) and is_palindrome(str(bin(num))[2:])]
print 'Palindroms:', palindrom_nums
print 'Palindrom sum:', sum(palindrom_nums)
if __name__ == '__main__':
t = time()
main()
print 'Time:', time() - t
|
Add problem 36, palindrome binaries
|
Add problem 36, palindrome binaries
|
Python
|
mit
|
dimkarakostas/project-euler
|
Add problem 36, palindrome binaries
|
from time import time
def is_palindrome(s):
for idx in range(len(s)/2):
if s[idx] != s[-1*idx - 1]:
return False
return True
def main():
palindrom_nums = [num for num in range(int(1e6)) if is_palindrome(str(num)) and is_palindrome(str(bin(num))[2:])]
print 'Palindroms:', palindrom_nums
print 'Palindrom sum:', sum(palindrom_nums)
if __name__ == '__main__':
t = time()
main()
print 'Time:', time() - t
|
<commit_before><commit_msg>Add problem 36, palindrome binaries<commit_after>
|
from time import time
def is_palindrome(s):
for idx in range(len(s)/2):
if s[idx] != s[-1*idx - 1]:
return False
return True
def main():
palindrom_nums = [num for num in range(int(1e6)) if is_palindrome(str(num)) and is_palindrome(str(bin(num))[2:])]
print 'Palindroms:', palindrom_nums
print 'Palindrom sum:', sum(palindrom_nums)
if __name__ == '__main__':
t = time()
main()
print 'Time:', time() - t
|
Add problem 36, palindrome binariesfrom time import time
def is_palindrome(s):
for idx in range(len(s)/2):
if s[idx] != s[-1*idx - 1]:
return False
return True
def main():
palindrom_nums = [num for num in range(int(1e6)) if is_palindrome(str(num)) and is_palindrome(str(bin(num))[2:])]
print 'Palindroms:', palindrom_nums
print 'Palindrom sum:', sum(palindrom_nums)
if __name__ == '__main__':
t = time()
main()
print 'Time:', time() - t
|
<commit_before><commit_msg>Add problem 36, palindrome binaries<commit_after>from time import time
def is_palindrome(s):
for idx in range(len(s)/2):
if s[idx] != s[-1*idx - 1]:
return False
return True
def main():
palindrom_nums = [num for num in range(int(1e6)) if is_palindrome(str(num)) and is_palindrome(str(bin(num))[2:])]
print 'Palindroms:', palindrom_nums
print 'Palindrom sum:', sum(palindrom_nums)
if __name__ == '__main__':
t = time()
main()
print 'Time:', time() - t
|
|
8fddde260af6ea1e6de8491dd99dca671634327c
|
test/operator/utility_test.py
|
test/operator/utility_test.py
|
# Copyright 2014, 2015 The ODL development group
#
# This file is part of ODL.
#
# ODL is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ODL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ODL. If not, see <http://www.gnu.org/licenses/>.
# Imports for common Python 2/3 codebase
from __future__ import print_function, division, absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import str, super
# External module imports
import pytest
import numpy as np
# ODL imports
import odl
from odl.operator.utility import matrix_representation
from odl.util.testutils import almost_equal
class MultiplyOp(odl.Operator):
"""Multiply with matrix.
"""
def __init__(self, matrix, domain=None, range=None):
domain = (odl.Rn(matrix.shape[1])
if domain is None else domain)
range = (odl.Rn(matrix.shape[0])
if range is None else range)
self.matrix = matrix
super().__init__(domain, range, linear=True)
def _apply(self, rhs, out):
np.dot(self.matrix, rhs.data, out=out.data)
@property
def adjoint(self):
return MultiplyOp(self.matrix.T, self.range, self.domain)
def test_matrix_representation():
# Verify that the matrix representation function returns the correct matrix
A = np.random.rand(3, 3)
Aop = MultiplyOp(A)
the_matrix = matrix_representation(Aop)
assert almost_equal(np.sum(np.abs(A - the_matrix)), 1e-6)
if __name__ == '__main__':
pytest.main(str(__file__.replace('\\', '/')) + ' -v')
|
Add test for the matrix representation function.
|
TST: Add test for the matrix representation function.
Add a small test in R3 for the matrix representation function, to see
that one gets the same matrix back.
|
Python
|
mpl-2.0
|
odlgroup/odl,odlgroup/odl,kohr-h/odl,kohr-h/odl,aringh/odl,aringh/odl
|
TST: Add test for the matrix representation function.
Add a small test in R3 for the matrix representation function, to see
that one gets the same matrix back.
|
# Copyright 2014, 2015 The ODL development group
#
# This file is part of ODL.
#
# ODL is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ODL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ODL. If not, see <http://www.gnu.org/licenses/>.
# Imports for common Python 2/3 codebase
from __future__ import print_function, division, absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import str, super
# External module imports
import pytest
import numpy as np
# ODL imports
import odl
from odl.operator.utility import matrix_representation
from odl.util.testutils import almost_equal
class MultiplyOp(odl.Operator):
"""Multiply with matrix.
"""
def __init__(self, matrix, domain=None, range=None):
domain = (odl.Rn(matrix.shape[1])
if domain is None else domain)
range = (odl.Rn(matrix.shape[0])
if range is None else range)
self.matrix = matrix
super().__init__(domain, range, linear=True)
def _apply(self, rhs, out):
np.dot(self.matrix, rhs.data, out=out.data)
@property
def adjoint(self):
return MultiplyOp(self.matrix.T, self.range, self.domain)
def test_matrix_representation():
# Verify that the matrix representation function returns the correct matrix
A = np.random.rand(3, 3)
Aop = MultiplyOp(A)
the_matrix = matrix_representation(Aop)
assert almost_equal(np.sum(np.abs(A - the_matrix)), 1e-6)
if __name__ == '__main__':
pytest.main(str(__file__.replace('\\', '/')) + ' -v')
|
<commit_before><commit_msg>TST: Add test for the matrix representation function.
Add a small test in R3 for the matrix representation function, to see
that one gets the same matrix back.<commit_after>
|
# Copyright 2014, 2015 The ODL development group
#
# This file is part of ODL.
#
# ODL is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ODL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ODL. If not, see <http://www.gnu.org/licenses/>.
# Imports for common Python 2/3 codebase
from __future__ import print_function, division, absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import str, super
# External module imports
import pytest
import numpy as np
# ODL imports
import odl
from odl.operator.utility import matrix_representation
from odl.util.testutils import almost_equal
class MultiplyOp(odl.Operator):
"""Multiply with matrix.
"""
def __init__(self, matrix, domain=None, range=None):
domain = (odl.Rn(matrix.shape[1])
if domain is None else domain)
range = (odl.Rn(matrix.shape[0])
if range is None else range)
self.matrix = matrix
super().__init__(domain, range, linear=True)
def _apply(self, rhs, out):
np.dot(self.matrix, rhs.data, out=out.data)
@property
def adjoint(self):
return MultiplyOp(self.matrix.T, self.range, self.domain)
def test_matrix_representation():
# Verify that the matrix representation function returns the correct matrix
A = np.random.rand(3, 3)
Aop = MultiplyOp(A)
the_matrix = matrix_representation(Aop)
assert almost_equal(np.sum(np.abs(A - the_matrix)), 1e-6)
if __name__ == '__main__':
pytest.main(str(__file__.replace('\\', '/')) + ' -v')
|
TST: Add test for the matrix representation function.
Add a small test in R3 for the matrix representation function, to see
that one gets the same matrix back.# Copyright 2014, 2015 The ODL development group
#
# This file is part of ODL.
#
# ODL is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ODL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ODL. If not, see <http://www.gnu.org/licenses/>.
# Imports for common Python 2/3 codebase
from __future__ import print_function, division, absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import str, super
# External module imports
import pytest
import numpy as np
# ODL imports
import odl
from odl.operator.utility import matrix_representation
from odl.util.testutils import almost_equal
class MultiplyOp(odl.Operator):
"""Multiply with matrix.
"""
def __init__(self, matrix, domain=None, range=None):
domain = (odl.Rn(matrix.shape[1])
if domain is None else domain)
range = (odl.Rn(matrix.shape[0])
if range is None else range)
self.matrix = matrix
super().__init__(domain, range, linear=True)
def _apply(self, rhs, out):
np.dot(self.matrix, rhs.data, out=out.data)
@property
def adjoint(self):
return MultiplyOp(self.matrix.T, self.range, self.domain)
def test_matrix_representation():
# Verify that the matrix representation function returns the correct matrix
A = np.random.rand(3, 3)
Aop = MultiplyOp(A)
the_matrix = matrix_representation(Aop)
assert almost_equal(np.sum(np.abs(A - the_matrix)), 1e-6)
if __name__ == '__main__':
pytest.main(str(__file__.replace('\\', '/')) + ' -v')
|
<commit_before><commit_msg>TST: Add test for the matrix representation function.
Add a small test in R3 for the matrix representation function, to see
that one gets the same matrix back.<commit_after># Copyright 2014, 2015 The ODL development group
#
# This file is part of ODL.
#
# ODL is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ODL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ODL. If not, see <http://www.gnu.org/licenses/>.
# Imports for common Python 2/3 codebase
from __future__ import print_function, division, absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import str, super
# External module imports
import pytest
import numpy as np
# ODL imports
import odl
from odl.operator.utility import matrix_representation
from odl.util.testutils import almost_equal
class MultiplyOp(odl.Operator):
"""Multiply with matrix.
"""
def __init__(self, matrix, domain=None, range=None):
domain = (odl.Rn(matrix.shape[1])
if domain is None else domain)
range = (odl.Rn(matrix.shape[0])
if range is None else range)
self.matrix = matrix
super().__init__(domain, range, linear=True)
def _apply(self, rhs, out):
np.dot(self.matrix, rhs.data, out=out.data)
@property
def adjoint(self):
return MultiplyOp(self.matrix.T, self.range, self.domain)
def test_matrix_representation():
# Verify that the matrix representation function returns the correct matrix
A = np.random.rand(3, 3)
Aop = MultiplyOp(A)
the_matrix = matrix_representation(Aop)
assert almost_equal(np.sum(np.abs(A - the_matrix)), 1e-6)
if __name__ == '__main__':
pytest.main(str(__file__.replace('\\', '/')) + ' -v')
|
|
8b92e55fa202723f7859cd1ea22e835e5c693807
|
Instanssi/kompomaatti/misc/awesometime.py
|
Instanssi/kompomaatti/misc/awesometime.py
|
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
def todayhelper():
today = datetime.today()
return datetime(day=today.day, year=today.year, month=today.month)
def format_single_helper(t):
now = datetime.now()
today = todayhelper()
tomorrow = today + timedelta(days=1)
the_day_after_tomorrow = today + timedelta(days=2) # Must honor the movie!
if t < now:
return "päättynyt"
elif t >= now and t < tomorrow:
return "tänään klo. " + t.strftime("%H:%M")
elif t >= tomorrow and t < the_day_after_tomorrow:
return "huomenna klo. " + t.strftime("%H:%M")
elif t >= the_day_after_tomorrow and t < today+timedelta(days=3):
return "ylihuomenna klo. " + t.strftime("%H:%M")
else:
return t.strftime("%d.%m.%Y klo. %H:%M")
def format_single(t):
return format_single_helper(t).capitalize()
def format_between(t1, t2):
now = datetime.now()
today = todayhelper()
tomorrow = today + timedelta(days=1)
the_day_after_tomorrow = today + timedelta(days=2) # Must honor the movie!
if t1 < now and t2 > now:
left = t2-now
l_hours = int(left.total_seconds() / timedelta(hours=1).total_seconds())
l_minutes = int((left.total_seconds() - timedelta(hours=l_hours).total_seconds()) / 60)
if(l_hours == 0):
return "Menossa, aikaa jäljellä " + str(l_minutes) + " minuuttia"
else:
return "Menossa, aikaa jäljellä " + str(l_hours) + " tuntia ja " + str(l_minutes) + " minuuttia"
elif t1 > now and t1 < today+timedelta(days=3):
return "Alkaa " + format_single_helper(t1) + " ja päättyy " + format_single_helper(t2)
else:
return "Alkaa " + t1.strftime("%d.%m.%Y %H:%M") + " ja päättyy " + t2.strftime("%d.%m.%Y %H:%M") + "."
|
Add some time handling functions
|
Add some time handling functions
|
Python
|
mit
|
Instanssi/Instanssi.org,Instanssi/Instanssi.org,Instanssi/Instanssi.org,Instanssi/Instanssi.org
|
Add some time handling functions
|
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
def todayhelper():
today = datetime.today()
return datetime(day=today.day, year=today.year, month=today.month)
def format_single_helper(t):
now = datetime.now()
today = todayhelper()
tomorrow = today + timedelta(days=1)
the_day_after_tomorrow = today + timedelta(days=2) # Must honor the movie!
if t < now:
return "päättynyt"
elif t >= now and t < tomorrow:
return "tänään klo. " + t.strftime("%H:%M")
elif t >= tomorrow and t < the_day_after_tomorrow:
return "huomenna klo. " + t.strftime("%H:%M")
elif t >= the_day_after_tomorrow and t < today+timedelta(days=3):
return "ylihuomenna klo. " + t.strftime("%H:%M")
else:
return t.strftime("%d.%m.%Y klo. %H:%M")
def format_single(t):
return format_single_helper(t).capitalize()
def format_between(t1, t2):
now = datetime.now()
today = todayhelper()
tomorrow = today + timedelta(days=1)
the_day_after_tomorrow = today + timedelta(days=2) # Must honor the movie!
if t1 < now and t2 > now:
left = t2-now
l_hours = int(left.total_seconds() / timedelta(hours=1).total_seconds())
l_minutes = int((left.total_seconds() - timedelta(hours=l_hours).total_seconds()) / 60)
if(l_hours == 0):
return "Menossa, aikaa jäljellä " + str(l_minutes) + " minuuttia"
else:
return "Menossa, aikaa jäljellä " + str(l_hours) + " tuntia ja " + str(l_minutes) + " minuuttia"
elif t1 > now and t1 < today+timedelta(days=3):
return "Alkaa " + format_single_helper(t1) + " ja päättyy " + format_single_helper(t2)
else:
return "Alkaa " + t1.strftime("%d.%m.%Y %H:%M") + " ja päättyy " + t2.strftime("%d.%m.%Y %H:%M") + "."
|
<commit_before><commit_msg>Add some time handling functions<commit_after>
|
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
def todayhelper():
today = datetime.today()
return datetime(day=today.day, year=today.year, month=today.month)
def format_single_helper(t):
now = datetime.now()
today = todayhelper()
tomorrow = today + timedelta(days=1)
the_day_after_tomorrow = today + timedelta(days=2) # Must honor the movie!
if t < now:
return "päättynyt"
elif t >= now and t < tomorrow:
return "tänään klo. " + t.strftime("%H:%M")
elif t >= tomorrow and t < the_day_after_tomorrow:
return "huomenna klo. " + t.strftime("%H:%M")
elif t >= the_day_after_tomorrow and t < today+timedelta(days=3):
return "ylihuomenna klo. " + t.strftime("%H:%M")
else:
return t.strftime("%d.%m.%Y klo. %H:%M")
def format_single(t):
return format_single_helper(t).capitalize()
def format_between(t1, t2):
now = datetime.now()
today = todayhelper()
tomorrow = today + timedelta(days=1)
the_day_after_tomorrow = today + timedelta(days=2) # Must honor the movie!
if t1 < now and t2 > now:
left = t2-now
l_hours = int(left.total_seconds() / timedelta(hours=1).total_seconds())
l_minutes = int((left.total_seconds() - timedelta(hours=l_hours).total_seconds()) / 60)
if(l_hours == 0):
return "Menossa, aikaa jäljellä " + str(l_minutes) + " minuuttia"
else:
return "Menossa, aikaa jäljellä " + str(l_hours) + " tuntia ja " + str(l_minutes) + " minuuttia"
elif t1 > now and t1 < today+timedelta(days=3):
return "Alkaa " + format_single_helper(t1) + " ja päättyy " + format_single_helper(t2)
else:
return "Alkaa " + t1.strftime("%d.%m.%Y %H:%M") + " ja päättyy " + t2.strftime("%d.%m.%Y %H:%M") + "."
|
Add some time handling functions# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
def todayhelper():
today = datetime.today()
return datetime(day=today.day, year=today.year, month=today.month)
def format_single_helper(t):
now = datetime.now()
today = todayhelper()
tomorrow = today + timedelta(days=1)
the_day_after_tomorrow = today + timedelta(days=2) # Must honor the movie!
if t < now:
return "päättynyt"
elif t >= now and t < tomorrow:
return "tänään klo. " + t.strftime("%H:%M")
elif t >= tomorrow and t < the_day_after_tomorrow:
return "huomenna klo. " + t.strftime("%H:%M")
elif t >= the_day_after_tomorrow and t < today+timedelta(days=3):
return "ylihuomenna klo. " + t.strftime("%H:%M")
else:
return t.strftime("%d.%m.%Y klo. %H:%M")
def format_single(t):
return format_single_helper(t).capitalize()
def format_between(t1, t2):
now = datetime.now()
today = todayhelper()
tomorrow = today + timedelta(days=1)
the_day_after_tomorrow = today + timedelta(days=2) # Must honor the movie!
if t1 < now and t2 > now:
left = t2-now
l_hours = int(left.total_seconds() / timedelta(hours=1).total_seconds())
l_minutes = int((left.total_seconds() - timedelta(hours=l_hours).total_seconds()) / 60)
if(l_hours == 0):
return "Menossa, aikaa jäljellä " + str(l_minutes) + " minuuttia"
else:
return "Menossa, aikaa jäljellä " + str(l_hours) + " tuntia ja " + str(l_minutes) + " minuuttia"
elif t1 > now and t1 < today+timedelta(days=3):
return "Alkaa " + format_single_helper(t1) + " ja päättyy " + format_single_helper(t2)
else:
return "Alkaa " + t1.strftime("%d.%m.%Y %H:%M") + " ja päättyy " + t2.strftime("%d.%m.%Y %H:%M") + "."
|
<commit_before><commit_msg>Add some time handling functions<commit_after># -*- coding: utf-8 -*-
from datetime import datetime, timedelta
def todayhelper():
today = datetime.today()
return datetime(day=today.day, year=today.year, month=today.month)
def format_single_helper(t):
now = datetime.now()
today = todayhelper()
tomorrow = today + timedelta(days=1)
the_day_after_tomorrow = today + timedelta(days=2) # Must honor the movie!
if t < now:
return "päättynyt"
elif t >= now and t < tomorrow:
return "tänään klo. " + t.strftime("%H:%M")
elif t >= tomorrow and t < the_day_after_tomorrow:
return "huomenna klo. " + t.strftime("%H:%M")
elif t >= the_day_after_tomorrow and t < today+timedelta(days=3):
return "ylihuomenna klo. " + t.strftime("%H:%M")
else:
return t.strftime("%d.%m.%Y klo. %H:%M")
def format_single(t):
return format_single_helper(t).capitalize()
def format_between(t1, t2):
now = datetime.now()
today = todayhelper()
tomorrow = today + timedelta(days=1)
the_day_after_tomorrow = today + timedelta(days=2) # Must honor the movie!
if t1 < now and t2 > now:
left = t2-now
l_hours = int(left.total_seconds() / timedelta(hours=1).total_seconds())
l_minutes = int((left.total_seconds() - timedelta(hours=l_hours).total_seconds()) / 60)
if(l_hours == 0):
return "Menossa, aikaa jäljellä " + str(l_minutes) + " minuuttia"
else:
return "Menossa, aikaa jäljellä " + str(l_hours) + " tuntia ja " + str(l_minutes) + " minuuttia"
elif t1 > now and t1 < today+timedelta(days=3):
return "Alkaa " + format_single_helper(t1) + " ja päättyy " + format_single_helper(t2)
else:
return "Alkaa " + t1.strftime("%d.%m.%Y %H:%M") + " ja päättyy " + t2.strftime("%d.%m.%Y %H:%M") + "."
|
|
bca4a0a0dda95306fe126191166e733c7ccea3ee
|
nodeconductor/backup/perms.py
|
nodeconductor/backup/perms.py
|
from nodeconductor.core.permissions import StaffPermissionLogic
PERMISSION_LOGICS = (
('backup.BackupSchedule', StaffPermissionLogic(any_permission=True)),
('backup.Backup', StaffPermissionLogic(any_permission=True)),
)
|
Add staff permissions for backup models
|
Add staff permissions for backup models
|
Python
|
mit
|
opennode/nodeconductor,opennode/nodeconductor,opennode/nodeconductor
|
Add staff permissions for backup models
|
from nodeconductor.core.permissions import StaffPermissionLogic
PERMISSION_LOGICS = (
('backup.BackupSchedule', StaffPermissionLogic(any_permission=True)),
('backup.Backup', StaffPermissionLogic(any_permission=True)),
)
|
<commit_before><commit_msg>Add staff permissions for backup models<commit_after>
|
from nodeconductor.core.permissions import StaffPermissionLogic
PERMISSION_LOGICS = (
('backup.BackupSchedule', StaffPermissionLogic(any_permission=True)),
('backup.Backup', StaffPermissionLogic(any_permission=True)),
)
|
Add staff permissions for backup modelsfrom nodeconductor.core.permissions import StaffPermissionLogic
PERMISSION_LOGICS = (
('backup.BackupSchedule', StaffPermissionLogic(any_permission=True)),
('backup.Backup', StaffPermissionLogic(any_permission=True)),
)
|
<commit_before><commit_msg>Add staff permissions for backup models<commit_after>from nodeconductor.core.permissions import StaffPermissionLogic
PERMISSION_LOGICS = (
('backup.BackupSchedule', StaffPermissionLogic(any_permission=True)),
('backup.Backup', StaffPermissionLogic(any_permission=True)),
)
|
|
2fda10a83aa5a4d3080a0ce8751e28a18fc9a3e0
|
examples/two_point.py
|
examples/two_point.py
|
"""
Demonstrates plotting multiple linear features with a single ``ax.pole`` call.
The real purpose of this example is to serve as an implicit regression test for
some oddities in the way axes grid lines are handled in matplotlib and
mplstereonet. A 2-vertex line can sometimes be confused for an axes grid line,
and they need different handling on a stereonet.
"""
import matplotlib.pyplot as plt
import mplstereonet
fig, ax = mplstereonet.subplots(figsize=(7,7))
strike = [200, 250]
dip = [50, 60]
ax.pole(strike, dip, 'go', markersize=10)
ax.grid()
plt.show()
|
Add two-point example to serve as a regression test for gridline/plot distinguishing
|
Add two-point example to serve as a regression test for gridline/plot distinguishing
|
Python
|
mit
|
joferkington/mplstereonet
|
Add two-point example to serve as a regression test for gridline/plot distinguishing
|
"""
Demonstrates plotting multiple linear features with a single ``ax.pole`` call.
The real purpose of this example is to serve as an implicit regression test for
some oddities in the way axes grid lines are handled in matplotlib and
mplstereonet. A 2-vertex line can sometimes be confused for an axes grid line,
and they need different handling on a stereonet.
"""
import matplotlib.pyplot as plt
import mplstereonet
fig, ax = mplstereonet.subplots(figsize=(7,7))
strike = [200, 250]
dip = [50, 60]
ax.pole(strike, dip, 'go', markersize=10)
ax.grid()
plt.show()
|
<commit_before><commit_msg>Add two-point example to serve as a regression test for gridline/plot distinguishing<commit_after>
|
"""
Demonstrates plotting multiple linear features with a single ``ax.pole`` call.
The real purpose of this example is to serve as an implicit regression test for
some oddities in the way axes grid lines are handled in matplotlib and
mplstereonet. A 2-vertex line can sometimes be confused for an axes grid line,
and they need different handling on a stereonet.
"""
import matplotlib.pyplot as plt
import mplstereonet
fig, ax = mplstereonet.subplots(figsize=(7,7))
strike = [200, 250]
dip = [50, 60]
ax.pole(strike, dip, 'go', markersize=10)
ax.grid()
plt.show()
|
Add two-point example to serve as a regression test for gridline/plot distinguishing"""
Demonstrates plotting multiple linear features with a single ``ax.pole`` call.
The real purpose of this example is to serve as an implicit regression test for
some oddities in the way axes grid lines are handled in matplotlib and
mplstereonet. A 2-vertex line can sometimes be confused for an axes grid line,
and they need different handling on a stereonet.
"""
import matplotlib.pyplot as plt
import mplstereonet
fig, ax = mplstereonet.subplots(figsize=(7,7))
strike = [200, 250]
dip = [50, 60]
ax.pole(strike, dip, 'go', markersize=10)
ax.grid()
plt.show()
|
<commit_before><commit_msg>Add two-point example to serve as a regression test for gridline/plot distinguishing<commit_after>"""
Demonstrates plotting multiple linear features with a single ``ax.pole`` call.
The real purpose of this example is to serve as an implicit regression test for
some oddities in the way axes grid lines are handled in matplotlib and
mplstereonet. A 2-vertex line can sometimes be confused for an axes grid line,
and they need different handling on a stereonet.
"""
import matplotlib.pyplot as plt
import mplstereonet
fig, ax = mplstereonet.subplots(figsize=(7,7))
strike = [200, 250]
dip = [50, 60]
ax.pole(strike, dip, 'go', markersize=10)
ax.grid()
plt.show()
|
|
ee85acb7f9f3af91db3bfb4bf766636883f07685
|
opal/tests/test_core_views.py
|
opal/tests/test_core_views.py
|
"""
Unittests for opal.core.views
"""
from opal.core import test
from opal.core import views
class SerializerTestCase(test.OpalTestCase):
def test_serializer_default_will_super(self):
s = views.OpalSerializer()
with self.assertRaises(TypeError):
s.default(None)
|
Add an extra test for the OpalSerializer
|
Add an extra test for the OpalSerializer
|
Python
|
agpl-3.0
|
khchine5/opal,khchine5/opal,khchine5/opal
|
Add an extra test for the OpalSerializer
|
"""
Unittests for opal.core.views
"""
from opal.core import test
from opal.core import views
class SerializerTestCase(test.OpalTestCase):
def test_serializer_default_will_super(self):
s = views.OpalSerializer()
with self.assertRaises(TypeError):
s.default(None)
|
<commit_before><commit_msg>Add an extra test for the OpalSerializer<commit_after>
|
"""
Unittests for opal.core.views
"""
from opal.core import test
from opal.core import views
class SerializerTestCase(test.OpalTestCase):
def test_serializer_default_will_super(self):
s = views.OpalSerializer()
with self.assertRaises(TypeError):
s.default(None)
|
Add an extra test for the OpalSerializer"""
Unittests for opal.core.views
"""
from opal.core import test
from opal.core import views
class SerializerTestCase(test.OpalTestCase):
def test_serializer_default_will_super(self):
s = views.OpalSerializer()
with self.assertRaises(TypeError):
s.default(None)
|
<commit_before><commit_msg>Add an extra test for the OpalSerializer<commit_after>"""
Unittests for opal.core.views
"""
from opal.core import test
from opal.core import views
class SerializerTestCase(test.OpalTestCase):
def test_serializer_default_will_super(self):
s = views.OpalSerializer()
with self.assertRaises(TypeError):
s.default(None)
|
|
a0a2017e05af986cd0a7207c429e7dc5e8b3fcd2
|
tests/test_solver_variable.py
|
tests/test_solver_variable.py
|
from gaphas.solver import Variable
def test_equality():
v = Variable(3)
w = Variable(3)
o = Variable(2)
assert v == 3
assert 3 == v
assert v == w
assert not v == o
assert v != 2
assert 2 != v
assert not 3 != v
assert v != o
def test_add_to_variable():
v = Variable(3)
assert v + 1 == 4
assert v - 1 == 2
assert 1 + v == 4
assert 4 - v == 1
def test_add_to_variable_with_variable():
v = Variable(3)
o = Variable(1)
assert v + o == 4
assert v - o == 2
def test_mutiplication():
v = Variable(3)
assert v * 2 == 6
assert v / 2 == 1.5
assert v // 2 == 1
assert 2 * v == 6
assert 4.5 / v == 1.5
assert 4 // v == 1
def test_mutiplication_with_variable():
v = Variable(3)
o = Variable(2)
assert v * o == 6
assert v / o == 1.5
assert v // o == 1
def test_comparison():
v = Variable(3)
assert v > 2
assert v < 4
assert v >= 2
assert v >= 3
assert v <= 4
assert v <= 3
assert not v > 3
assert not v < 3
assert not v <= 2
assert not v >= 4
def test_inverse_comparison():
v = Variable(3)
assert 4 > v
assert 2 < v
assert 4 >= v
assert 3 >= v
assert 2 <= v
assert 3 <= v
assert not 3 > v
assert not 3 < v
assert not 4 <= v
assert not 2 >= v
def test_power():
v = Variable(3)
o = Variable(2)
assert v ** 2 == 9
assert 2 ** v == 8
assert v ** o == 9
def test_modulo():
v = Variable(3)
o = Variable(2)
assert v % 2 == 1
assert 4 % v == 1
assert v % o == 1
assert divmod(v, 2) == (1, 1)
assert divmod(4, v) == (1, 1)
assert divmod(v, o) == (1, 1)
|
Add missing tests for Variable
|
Add missing tests for Variable
|
Python
|
lgpl-2.1
|
amolenaar/gaphas
|
Add missing tests for Variable
|
from gaphas.solver import Variable
def test_equality():
v = Variable(3)
w = Variable(3)
o = Variable(2)
assert v == 3
assert 3 == v
assert v == w
assert not v == o
assert v != 2
assert 2 != v
assert not 3 != v
assert v != o
def test_add_to_variable():
v = Variable(3)
assert v + 1 == 4
assert v - 1 == 2
assert 1 + v == 4
assert 4 - v == 1
def test_add_to_variable_with_variable():
v = Variable(3)
o = Variable(1)
assert v + o == 4
assert v - o == 2
def test_mutiplication():
v = Variable(3)
assert v * 2 == 6
assert v / 2 == 1.5
assert v // 2 == 1
assert 2 * v == 6
assert 4.5 / v == 1.5
assert 4 // v == 1
def test_mutiplication_with_variable():
v = Variable(3)
o = Variable(2)
assert v * o == 6
assert v / o == 1.5
assert v // o == 1
def test_comparison():
v = Variable(3)
assert v > 2
assert v < 4
assert v >= 2
assert v >= 3
assert v <= 4
assert v <= 3
assert not v > 3
assert not v < 3
assert not v <= 2
assert not v >= 4
def test_inverse_comparison():
v = Variable(3)
assert 4 > v
assert 2 < v
assert 4 >= v
assert 3 >= v
assert 2 <= v
assert 3 <= v
assert not 3 > v
assert not 3 < v
assert not 4 <= v
assert not 2 >= v
def test_power():
v = Variable(3)
o = Variable(2)
assert v ** 2 == 9
assert 2 ** v == 8
assert v ** o == 9
def test_modulo():
v = Variable(3)
o = Variable(2)
assert v % 2 == 1
assert 4 % v == 1
assert v % o == 1
assert divmod(v, 2) == (1, 1)
assert divmod(4, v) == (1, 1)
assert divmod(v, o) == (1, 1)
|
<commit_before><commit_msg>Add missing tests for Variable<commit_after>
|
from gaphas.solver import Variable
def test_equality():
v = Variable(3)
w = Variable(3)
o = Variable(2)
assert v == 3
assert 3 == v
assert v == w
assert not v == o
assert v != 2
assert 2 != v
assert not 3 != v
assert v != o
def test_add_to_variable():
v = Variable(3)
assert v + 1 == 4
assert v - 1 == 2
assert 1 + v == 4
assert 4 - v == 1
def test_add_to_variable_with_variable():
v = Variable(3)
o = Variable(1)
assert v + o == 4
assert v - o == 2
def test_mutiplication():
v = Variable(3)
assert v * 2 == 6
assert v / 2 == 1.5
assert v // 2 == 1
assert 2 * v == 6
assert 4.5 / v == 1.5
assert 4 // v == 1
def test_mutiplication_with_variable():
v = Variable(3)
o = Variable(2)
assert v * o == 6
assert v / o == 1.5
assert v // o == 1
def test_comparison():
v = Variable(3)
assert v > 2
assert v < 4
assert v >= 2
assert v >= 3
assert v <= 4
assert v <= 3
assert not v > 3
assert not v < 3
assert not v <= 2
assert not v >= 4
def test_inverse_comparison():
v = Variable(3)
assert 4 > v
assert 2 < v
assert 4 >= v
assert 3 >= v
assert 2 <= v
assert 3 <= v
assert not 3 > v
assert not 3 < v
assert not 4 <= v
assert not 2 >= v
def test_power():
v = Variable(3)
o = Variable(2)
assert v ** 2 == 9
assert 2 ** v == 8
assert v ** o == 9
def test_modulo():
v = Variable(3)
o = Variable(2)
assert v % 2 == 1
assert 4 % v == 1
assert v % o == 1
assert divmod(v, 2) == (1, 1)
assert divmod(4, v) == (1, 1)
assert divmod(v, o) == (1, 1)
|
Add missing tests for Variablefrom gaphas.solver import Variable
def test_equality():
v = Variable(3)
w = Variable(3)
o = Variable(2)
assert v == 3
assert 3 == v
assert v == w
assert not v == o
assert v != 2
assert 2 != v
assert not 3 != v
assert v != o
def test_add_to_variable():
v = Variable(3)
assert v + 1 == 4
assert v - 1 == 2
assert 1 + v == 4
assert 4 - v == 1
def test_add_to_variable_with_variable():
v = Variable(3)
o = Variable(1)
assert v + o == 4
assert v - o == 2
def test_mutiplication():
v = Variable(3)
assert v * 2 == 6
assert v / 2 == 1.5
assert v // 2 == 1
assert 2 * v == 6
assert 4.5 / v == 1.5
assert 4 // v == 1
def test_mutiplication_with_variable():
v = Variable(3)
o = Variable(2)
assert v * o == 6
assert v / o == 1.5
assert v // o == 1
def test_comparison():
v = Variable(3)
assert v > 2
assert v < 4
assert v >= 2
assert v >= 3
assert v <= 4
assert v <= 3
assert not v > 3
assert not v < 3
assert not v <= 2
assert not v >= 4
def test_inverse_comparison():
v = Variable(3)
assert 4 > v
assert 2 < v
assert 4 >= v
assert 3 >= v
assert 2 <= v
assert 3 <= v
assert not 3 > v
assert not 3 < v
assert not 4 <= v
assert not 2 >= v
def test_power():
v = Variable(3)
o = Variable(2)
assert v ** 2 == 9
assert 2 ** v == 8
assert v ** o == 9
def test_modulo():
v = Variable(3)
o = Variable(2)
assert v % 2 == 1
assert 4 % v == 1
assert v % o == 1
assert divmod(v, 2) == (1, 1)
assert divmod(4, v) == (1, 1)
assert divmod(v, o) == (1, 1)
|
<commit_before><commit_msg>Add missing tests for Variable<commit_after>from gaphas.solver import Variable
def test_equality():
v = Variable(3)
w = Variable(3)
o = Variable(2)
assert v == 3
assert 3 == v
assert v == w
assert not v == o
assert v != 2
assert 2 != v
assert not 3 != v
assert v != o
def test_add_to_variable():
v = Variable(3)
assert v + 1 == 4
assert v - 1 == 2
assert 1 + v == 4
assert 4 - v == 1
def test_add_to_variable_with_variable():
v = Variable(3)
o = Variable(1)
assert v + o == 4
assert v - o == 2
def test_mutiplication():
v = Variable(3)
assert v * 2 == 6
assert v / 2 == 1.5
assert v // 2 == 1
assert 2 * v == 6
assert 4.5 / v == 1.5
assert 4 // v == 1
def test_mutiplication_with_variable():
v = Variable(3)
o = Variable(2)
assert v * o == 6
assert v / o == 1.5
assert v // o == 1
def test_comparison():
v = Variable(3)
assert v > 2
assert v < 4
assert v >= 2
assert v >= 3
assert v <= 4
assert v <= 3
assert not v > 3
assert not v < 3
assert not v <= 2
assert not v >= 4
def test_inverse_comparison():
v = Variable(3)
assert 4 > v
assert 2 < v
assert 4 >= v
assert 3 >= v
assert 2 <= v
assert 3 <= v
assert not 3 > v
assert not 3 < v
assert not 4 <= v
assert not 2 >= v
def test_power():
v = Variable(3)
o = Variable(2)
assert v ** 2 == 9
assert 2 ** v == 8
assert v ** o == 9
def test_modulo():
v = Variable(3)
o = Variable(2)
assert v % 2 == 1
assert 4 % v == 1
assert v % o == 1
assert divmod(v, 2) == (1, 1)
assert divmod(4, v) == (1, 1)
assert divmod(v, o) == (1, 1)
|
|
e87982d03edeb7c16d3c183309adfff4be50d168
|
gui/qt.py
|
gui/qt.py
|
from lib.version import AMON_VERSION
from lib.keybase import KeybaseUser
from lib.gmail import GmailUser
from lib.addresses import AddressBook
import lib.gpg as gpg
import sys
import logging
import json
from PyQt4 import QtGui
class Amon(QtGui.QMainWindow):
def __init__(self):
super(Amon, self).__init__()
self.keybase_user = KeybaseUser()
self.gmail = GmailUser()
self.address_book = AddressBook()
|
Add Qt4 file to start on creating a Qt-based GUI
|
Add Qt4 file to start on creating a Qt-based GUI
|
Python
|
unlicense
|
CodingAnarchy/Amon
|
Add Qt4 file to start on creating a Qt-based GUI
|
from lib.version import AMON_VERSION
from lib.keybase import KeybaseUser
from lib.gmail import GmailUser
from lib.addresses import AddressBook
import lib.gpg as gpg
import sys
import logging
import json
from PyQt4 import QtGui
class Amon(QtGui.QMainWindow):
def __init__(self):
super(Amon, self).__init__()
self.keybase_user = KeybaseUser()
self.gmail = GmailUser()
self.address_book = AddressBook()
|
<commit_before><commit_msg>Add Qt4 file to start on creating a Qt-based GUI<commit_after>
|
from lib.version import AMON_VERSION
from lib.keybase import KeybaseUser
from lib.gmail import GmailUser
from lib.addresses import AddressBook
import lib.gpg as gpg
import sys
import logging
import json
from PyQt4 import QtGui
class Amon(QtGui.QMainWindow):
def __init__(self):
super(Amon, self).__init__()
self.keybase_user = KeybaseUser()
self.gmail = GmailUser()
self.address_book = AddressBook()
|
Add Qt4 file to start on creating a Qt-based GUIfrom lib.version import AMON_VERSION
from lib.keybase import KeybaseUser
from lib.gmail import GmailUser
from lib.addresses import AddressBook
import lib.gpg as gpg
import sys
import logging
import json
from PyQt4 import QtGui
class Amon(QtGui.QMainWindow):
def __init__(self):
super(Amon, self).__init__()
self.keybase_user = KeybaseUser()
self.gmail = GmailUser()
self.address_book = AddressBook()
|
<commit_before><commit_msg>Add Qt4 file to start on creating a Qt-based GUI<commit_after>from lib.version import AMON_VERSION
from lib.keybase import KeybaseUser
from lib.gmail import GmailUser
from lib.addresses import AddressBook
import lib.gpg as gpg
import sys
import logging
import json
from PyQt4 import QtGui
class Amon(QtGui.QMainWindow):
def __init__(self):
super(Amon, self).__init__()
self.keybase_user = KeybaseUser()
self.gmail = GmailUser()
self.address_book = AddressBook()
|
|
8ae3e44b0a43f382c98194b9caa097b62de899ef
|
nlpppln/save_ner_data.py
|
nlpppln/save_ner_data.py
|
#!/usr/bin/env python
import click
import os
import codecs
import json
import pandas as pd
@click.command()
@click.argument('input_dir', type=click.Path(exists=True))
@click.argument('output_file', type=click.Path())
def nerstats(input_dir, output_file):
output_dir = os.path.dirname(output_file)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
frames = []
files = os.listdir(input_dir)
for fi in files:
with codecs.open(os.path.join(input_dir, fi), encoding='utf-8') as f:
saf = json.load(f)
data = {}
data['word'] = [t['word'] for t in saf['tokens'] if 'ne' in t.keys()]
data['ner'] = [t['ne'] for t in saf['tokens'] if 'ne' in t.keys()]
data['w_id'] = [t['id'] for t in saf['tokens'] if 'ne' in t.keys()]
data['text'] = [fi for t in saf['tokens'] if 'ne' in t.keys()]
frames.append(pd.DataFrame(data=data))
df = pd.concat(frames, ignore_index=True)
df.to_csv(output_file)
if __name__ == '__main__':
nerstats()
|
Add script to save ner data to a csv file
|
Add script to save ner data to a csv file
|
Python
|
apache-2.0
|
WhatWorksWhenForWhom/nlppln,WhatWorksWhenForWhom/nlppln,WhatWorksWhenForWhom/nlppln
|
Add script to save ner data to a csv file
|
#!/usr/bin/env python
import click
import os
import codecs
import json
import pandas as pd
@click.command()
@click.argument('input_dir', type=click.Path(exists=True))
@click.argument('output_file', type=click.Path())
def nerstats(input_dir, output_file):
output_dir = os.path.dirname(output_file)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
frames = []
files = os.listdir(input_dir)
for fi in files:
with codecs.open(os.path.join(input_dir, fi), encoding='utf-8') as f:
saf = json.load(f)
data = {}
data['word'] = [t['word'] for t in saf['tokens'] if 'ne' in t.keys()]
data['ner'] = [t['ne'] for t in saf['tokens'] if 'ne' in t.keys()]
data['w_id'] = [t['id'] for t in saf['tokens'] if 'ne' in t.keys()]
data['text'] = [fi for t in saf['tokens'] if 'ne' in t.keys()]
frames.append(pd.DataFrame(data=data))
df = pd.concat(frames, ignore_index=True)
df.to_csv(output_file)
if __name__ == '__main__':
nerstats()
|
<commit_before><commit_msg>Add script to save ner data to a csv file<commit_after>
|
#!/usr/bin/env python
import click
import os
import codecs
import json
import pandas as pd
@click.command()
@click.argument('input_dir', type=click.Path(exists=True))
@click.argument('output_file', type=click.Path())
def nerstats(input_dir, output_file):
output_dir = os.path.dirname(output_file)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
frames = []
files = os.listdir(input_dir)
for fi in files:
with codecs.open(os.path.join(input_dir, fi), encoding='utf-8') as f:
saf = json.load(f)
data = {}
data['word'] = [t['word'] for t in saf['tokens'] if 'ne' in t.keys()]
data['ner'] = [t['ne'] for t in saf['tokens'] if 'ne' in t.keys()]
data['w_id'] = [t['id'] for t in saf['tokens'] if 'ne' in t.keys()]
data['text'] = [fi for t in saf['tokens'] if 'ne' in t.keys()]
frames.append(pd.DataFrame(data=data))
df = pd.concat(frames, ignore_index=True)
df.to_csv(output_file)
if __name__ == '__main__':
nerstats()
|
Add script to save ner data to a csv file#!/usr/bin/env python
import click
import os
import codecs
import json
import pandas as pd
@click.command()
@click.argument('input_dir', type=click.Path(exists=True))
@click.argument('output_file', type=click.Path())
def nerstats(input_dir, output_file):
output_dir = os.path.dirname(output_file)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
frames = []
files = os.listdir(input_dir)
for fi in files:
with codecs.open(os.path.join(input_dir, fi), encoding='utf-8') as f:
saf = json.load(f)
data = {}
data['word'] = [t['word'] for t in saf['tokens'] if 'ne' in t.keys()]
data['ner'] = [t['ne'] for t in saf['tokens'] if 'ne' in t.keys()]
data['w_id'] = [t['id'] for t in saf['tokens'] if 'ne' in t.keys()]
data['text'] = [fi for t in saf['tokens'] if 'ne' in t.keys()]
frames.append(pd.DataFrame(data=data))
df = pd.concat(frames, ignore_index=True)
df.to_csv(output_file)
if __name__ == '__main__':
nerstats()
|
<commit_before><commit_msg>Add script to save ner data to a csv file<commit_after>#!/usr/bin/env python
import click
import os
import codecs
import json
import pandas as pd
@click.command()
@click.argument('input_dir', type=click.Path(exists=True))
@click.argument('output_file', type=click.Path())
def nerstats(input_dir, output_file):
output_dir = os.path.dirname(output_file)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
frames = []
files = os.listdir(input_dir)
for fi in files:
with codecs.open(os.path.join(input_dir, fi), encoding='utf-8') as f:
saf = json.load(f)
data = {}
data['word'] = [t['word'] for t in saf['tokens'] if 'ne' in t.keys()]
data['ner'] = [t['ne'] for t in saf['tokens'] if 'ne' in t.keys()]
data['w_id'] = [t['id'] for t in saf['tokens'] if 'ne' in t.keys()]
data['text'] = [fi for t in saf['tokens'] if 'ne' in t.keys()]
frames.append(pd.DataFrame(data=data))
df = pd.concat(frames, ignore_index=True)
df.to_csv(output_file)
if __name__ == '__main__':
nerstats()
|
|
59de1a12d44245b69ade0d4703c98bf772681751
|
user_management/models/tests/test_admin_forms.py
|
user_management/models/tests/test_admin_forms.py
|
from django.core.exceptions import ValidationError
from django.test import TestCase
from .. import admin_forms
from . factories import UserFactory
class UserCreationFormTest(TestCase):
def test_clean_email(self):
email = 'test@example.com'
form = admin_forms.UserCreationForm()
form.cleaned_data = {'email': email}
self.assertEqual(form.clean_email(), email)
def test_clean_duplicate_email(self):
user = UserFactory.create()
form = admin_forms.UserCreationForm()
form.cleaned_data = {'email': user.email}
with self.assertRaises(ValidationError):
form.clean_email()
def test_clean(self):
data = {'password1': 'pass123', 'password2': 'pass123'}
form = admin_forms.UserCreationForm()
form.cleaned_data = data
self.assertEqual(form.clean(), data)
def test_clean_mismatched(self):
data = {'password1': 'pass123', 'password2': 'pass321'}
form = admin_forms.UserCreationForm()
form.cleaned_data = data
with self.assertRaises(ValidationError):
form.clean()
class UserChangeFormTest(TestCase):
def test_clean_password(self):
password = 'pass123'
data = {'password': password}
user = UserFactory.build()
form = admin_forms.UserChangeForm(data, instance=user)
self.assertNotEqual(form.clean_password(), password)
|
Add tests for User admin_forms
|
Add tests for User admin_forms
|
Python
|
bsd-2-clause
|
incuna/django-user-management,incuna/django-user-management
|
Add tests for User admin_forms
|
from django.core.exceptions import ValidationError
from django.test import TestCase
from .. import admin_forms
from . factories import UserFactory
class UserCreationFormTest(TestCase):
def test_clean_email(self):
email = 'test@example.com'
form = admin_forms.UserCreationForm()
form.cleaned_data = {'email': email}
self.assertEqual(form.clean_email(), email)
def test_clean_duplicate_email(self):
user = UserFactory.create()
form = admin_forms.UserCreationForm()
form.cleaned_data = {'email': user.email}
with self.assertRaises(ValidationError):
form.clean_email()
def test_clean(self):
data = {'password1': 'pass123', 'password2': 'pass123'}
form = admin_forms.UserCreationForm()
form.cleaned_data = data
self.assertEqual(form.clean(), data)
def test_clean_mismatched(self):
data = {'password1': 'pass123', 'password2': 'pass321'}
form = admin_forms.UserCreationForm()
form.cleaned_data = data
with self.assertRaises(ValidationError):
form.clean()
class UserChangeFormTest(TestCase):
def test_clean_password(self):
password = 'pass123'
data = {'password': password}
user = UserFactory.build()
form = admin_forms.UserChangeForm(data, instance=user)
self.assertNotEqual(form.clean_password(), password)
|
<commit_before><commit_msg>Add tests for User admin_forms<commit_after>
|
from django.core.exceptions import ValidationError
from django.test import TestCase
from .. import admin_forms
from . factories import UserFactory
class UserCreationFormTest(TestCase):
def test_clean_email(self):
email = 'test@example.com'
form = admin_forms.UserCreationForm()
form.cleaned_data = {'email': email}
self.assertEqual(form.clean_email(), email)
def test_clean_duplicate_email(self):
user = UserFactory.create()
form = admin_forms.UserCreationForm()
form.cleaned_data = {'email': user.email}
with self.assertRaises(ValidationError):
form.clean_email()
def test_clean(self):
data = {'password1': 'pass123', 'password2': 'pass123'}
form = admin_forms.UserCreationForm()
form.cleaned_data = data
self.assertEqual(form.clean(), data)
def test_clean_mismatched(self):
data = {'password1': 'pass123', 'password2': 'pass321'}
form = admin_forms.UserCreationForm()
form.cleaned_data = data
with self.assertRaises(ValidationError):
form.clean()
class UserChangeFormTest(TestCase):
def test_clean_password(self):
password = 'pass123'
data = {'password': password}
user = UserFactory.build()
form = admin_forms.UserChangeForm(data, instance=user)
self.assertNotEqual(form.clean_password(), password)
|
Add tests for User admin_formsfrom django.core.exceptions import ValidationError
from django.test import TestCase
from .. import admin_forms
from . factories import UserFactory
class UserCreationFormTest(TestCase):
def test_clean_email(self):
email = 'test@example.com'
form = admin_forms.UserCreationForm()
form.cleaned_data = {'email': email}
self.assertEqual(form.clean_email(), email)
def test_clean_duplicate_email(self):
user = UserFactory.create()
form = admin_forms.UserCreationForm()
form.cleaned_data = {'email': user.email}
with self.assertRaises(ValidationError):
form.clean_email()
def test_clean(self):
data = {'password1': 'pass123', 'password2': 'pass123'}
form = admin_forms.UserCreationForm()
form.cleaned_data = data
self.assertEqual(form.clean(), data)
def test_clean_mismatched(self):
data = {'password1': 'pass123', 'password2': 'pass321'}
form = admin_forms.UserCreationForm()
form.cleaned_data = data
with self.assertRaises(ValidationError):
form.clean()
class UserChangeFormTest(TestCase):
def test_clean_password(self):
password = 'pass123'
data = {'password': password}
user = UserFactory.build()
form = admin_forms.UserChangeForm(data, instance=user)
self.assertNotEqual(form.clean_password(), password)
|
<commit_before><commit_msg>Add tests for User admin_forms<commit_after>from django.core.exceptions import ValidationError
from django.test import TestCase
from .. import admin_forms
from . factories import UserFactory
class UserCreationFormTest(TestCase):
def test_clean_email(self):
email = 'test@example.com'
form = admin_forms.UserCreationForm()
form.cleaned_data = {'email': email}
self.assertEqual(form.clean_email(), email)
def test_clean_duplicate_email(self):
user = UserFactory.create()
form = admin_forms.UserCreationForm()
form.cleaned_data = {'email': user.email}
with self.assertRaises(ValidationError):
form.clean_email()
def test_clean(self):
data = {'password1': 'pass123', 'password2': 'pass123'}
form = admin_forms.UserCreationForm()
form.cleaned_data = data
self.assertEqual(form.clean(), data)
def test_clean_mismatched(self):
data = {'password1': 'pass123', 'password2': 'pass321'}
form = admin_forms.UserCreationForm()
form.cleaned_data = data
with self.assertRaises(ValidationError):
form.clean()
class UserChangeFormTest(TestCase):
def test_clean_password(self):
password = 'pass123'
data = {'password': password}
user = UserFactory.build()
form = admin_forms.UserChangeForm(data, instance=user)
self.assertNotEqual(form.clean_password(), password)
|
|
bae50495106ce5c9cb39143a58e0e73a4e823d29
|
loader.py
|
loader.py
|
from __future__ import print_function, absolute_import, unicode_literals, division
from stackable.stack import Stack
from stackable.utils import StackablePickler
from stackable.network import StackableSocket, StackablePacketAssembler
from sys import modules
from types import ModuleType
class DispatchLoader(object):
def __init__(self, ip, port):
self.stack = Stack((StackableSocket(ip=ip, port=port),
StackablePacketAssembler(),
StackablePickler()))
self.cache = {}
def get_module(self, name):
if name in self.cache:
return self.cache[name]
else:
self.stack.write({'load': name})
o = self.stack.read()
if o['module'] != None:
self.cache[name] = o['module']
return o['module']
def find_module(self, fullname, path=None):
if self.get_module(fullname) != None:
self.path = path
return self
return None
def load_module(self, name):
if name in modules:
return modules[name]
m = ModuleType(name, name)
modules[name] = m
mod = self.get_module(name)
if mod == None:
raise ImportError("No such module")
exec mod in m.__dict__
return m
|
Implement DispatchLoader (metapath import hook)
|
Implement DispatchLoader (metapath import hook)
|
Python
|
mit
|
joushou/dispatch,joushou/dispatch
|
Implement DispatchLoader (metapath import hook)
|
from __future__ import print_function, absolute_import, unicode_literals, division
from stackable.stack import Stack
from stackable.utils import StackablePickler
from stackable.network import StackableSocket, StackablePacketAssembler
from sys import modules
from types import ModuleType
class DispatchLoader(object):
def __init__(self, ip, port):
self.stack = Stack((StackableSocket(ip=ip, port=port),
StackablePacketAssembler(),
StackablePickler()))
self.cache = {}
def get_module(self, name):
if name in self.cache:
return self.cache[name]
else:
self.stack.write({'load': name})
o = self.stack.read()
if o['module'] != None:
self.cache[name] = o['module']
return o['module']
def find_module(self, fullname, path=None):
if self.get_module(fullname) != None:
self.path = path
return self
return None
def load_module(self, name):
if name in modules:
return modules[name]
m = ModuleType(name, name)
modules[name] = m
mod = self.get_module(name)
if mod == None:
raise ImportError("No such module")
exec mod in m.__dict__
return m
|
<commit_before><commit_msg>Implement DispatchLoader (metapath import hook)<commit_after>
|
from __future__ import print_function, absolute_import, unicode_literals, division
from stackable.stack import Stack
from stackable.utils import StackablePickler
from stackable.network import StackableSocket, StackablePacketAssembler
from sys import modules
from types import ModuleType
class DispatchLoader(object):
def __init__(self, ip, port):
self.stack = Stack((StackableSocket(ip=ip, port=port),
StackablePacketAssembler(),
StackablePickler()))
self.cache = {}
def get_module(self, name):
if name in self.cache:
return self.cache[name]
else:
self.stack.write({'load': name})
o = self.stack.read()
if o['module'] != None:
self.cache[name] = o['module']
return o['module']
def find_module(self, fullname, path=None):
if self.get_module(fullname) != None:
self.path = path
return self
return None
def load_module(self, name):
if name in modules:
return modules[name]
m = ModuleType(name, name)
modules[name] = m
mod = self.get_module(name)
if mod == None:
raise ImportError("No such module")
exec mod in m.__dict__
return m
|
Implement DispatchLoader (metapath import hook)from __future__ import print_function, absolute_import, unicode_literals, division
from stackable.stack import Stack
from stackable.utils import StackablePickler
from stackable.network import StackableSocket, StackablePacketAssembler
from sys import modules
from types import ModuleType
class DispatchLoader(object):
def __init__(self, ip, port):
self.stack = Stack((StackableSocket(ip=ip, port=port),
StackablePacketAssembler(),
StackablePickler()))
self.cache = {}
def get_module(self, name):
if name in self.cache:
return self.cache[name]
else:
self.stack.write({'load': name})
o = self.stack.read()
if o['module'] != None:
self.cache[name] = o['module']
return o['module']
def find_module(self, fullname, path=None):
if self.get_module(fullname) != None:
self.path = path
return self
return None
def load_module(self, name):
if name in modules:
return modules[name]
m = ModuleType(name, name)
modules[name] = m
mod = self.get_module(name)
if mod == None:
raise ImportError("No such module")
exec mod in m.__dict__
return m
|
<commit_before><commit_msg>Implement DispatchLoader (metapath import hook)<commit_after>from __future__ import print_function, absolute_import, unicode_literals, division
from stackable.stack import Stack
from stackable.utils import StackablePickler
from stackable.network import StackableSocket, StackablePacketAssembler
from sys import modules
from types import ModuleType
class DispatchLoader(object):
def __init__(self, ip, port):
self.stack = Stack((StackableSocket(ip=ip, port=port),
StackablePacketAssembler(),
StackablePickler()))
self.cache = {}
def get_module(self, name):
if name in self.cache:
return self.cache[name]
else:
self.stack.write({'load': name})
o = self.stack.read()
if o['module'] != None:
self.cache[name] = o['module']
return o['module']
def find_module(self, fullname, path=None):
if self.get_module(fullname) != None:
self.path = path
return self
return None
def load_module(self, name):
if name in modules:
return modules[name]
m = ModuleType(name, name)
modules[name] = m
mod = self.get_module(name)
if mod == None:
raise ImportError("No such module")
exec mod in m.__dict__
return m
|
|
5d795253180ef11117ae27447fa597fa15b40734
|
tests/test_graph.py
|
tests/test_graph.py
|
import os
from click.testing import CliRunner
from cli.script import cli
def get_graph_code():
return '''
from copy import deepcopy as dc
class StringCopier(object):
def __init__(self):
self.copied_strings = set()
def copy(self):
string1 = 'this'
string2 = dc(string1)
string1.add(string1)
return string2
class DoSomething(object):
def something(self):
copier = StringCopier()
copied_string = copier.copy()
'''
def test_produce_graph():
runner = CliRunner()
with runner.isolated_filesystem():
with open('code.py', 'w') as f:
f.write(get_graph_code())
runner.invoke(cli, ['code.py', '--output', 'code_output'])
assert 'code_output' in os.listdir(os.path.curdir)
assert 'code_output.pdf' in os.listdir(os.path.curdir)
def test_file_extension():
runner = CliRunner()
with runner.isolated_filesystem():
with open('code.py', 'w') as f:
f.write(get_graph_code())
runner.invoke(cli, ['code.py', '--output', 'code_output', '--output-format', 'png'])
assert 'code_output' in os.listdir(os.path.curdir)
assert 'code_output.png' in os.listdir(os.path.curdir)
assert 'code_output.pdf' not in os.listdir(os.path.curdir)
|
Add testing for graphing code
|
Add testing for graphing code
|
Python
|
mit
|
LaurEars/codegrapher
|
Add testing for graphing code
|
import os
from click.testing import CliRunner
from cli.script import cli
def get_graph_code():
return '''
from copy import deepcopy as dc
class StringCopier(object):
def __init__(self):
self.copied_strings = set()
def copy(self):
string1 = 'this'
string2 = dc(string1)
string1.add(string1)
return string2
class DoSomething(object):
def something(self):
copier = StringCopier()
copied_string = copier.copy()
'''
def test_produce_graph():
runner = CliRunner()
with runner.isolated_filesystem():
with open('code.py', 'w') as f:
f.write(get_graph_code())
runner.invoke(cli, ['code.py', '--output', 'code_output'])
assert 'code_output' in os.listdir(os.path.curdir)
assert 'code_output.pdf' in os.listdir(os.path.curdir)
def test_file_extension():
runner = CliRunner()
with runner.isolated_filesystem():
with open('code.py', 'w') as f:
f.write(get_graph_code())
runner.invoke(cli, ['code.py', '--output', 'code_output', '--output-format', 'png'])
assert 'code_output' in os.listdir(os.path.curdir)
assert 'code_output.png' in os.listdir(os.path.curdir)
assert 'code_output.pdf' not in os.listdir(os.path.curdir)
|
<commit_before><commit_msg>Add testing for graphing code<commit_after>
|
import os
from click.testing import CliRunner
from cli.script import cli
def get_graph_code():
return '''
from copy import deepcopy as dc
class StringCopier(object):
def __init__(self):
self.copied_strings = set()
def copy(self):
string1 = 'this'
string2 = dc(string1)
string1.add(string1)
return string2
class DoSomething(object):
def something(self):
copier = StringCopier()
copied_string = copier.copy()
'''
def test_produce_graph():
runner = CliRunner()
with runner.isolated_filesystem():
with open('code.py', 'w') as f:
f.write(get_graph_code())
runner.invoke(cli, ['code.py', '--output', 'code_output'])
assert 'code_output' in os.listdir(os.path.curdir)
assert 'code_output.pdf' in os.listdir(os.path.curdir)
def test_file_extension():
runner = CliRunner()
with runner.isolated_filesystem():
with open('code.py', 'w') as f:
f.write(get_graph_code())
runner.invoke(cli, ['code.py', '--output', 'code_output', '--output-format', 'png'])
assert 'code_output' in os.listdir(os.path.curdir)
assert 'code_output.png' in os.listdir(os.path.curdir)
assert 'code_output.pdf' not in os.listdir(os.path.curdir)
|
Add testing for graphing codeimport os
from click.testing import CliRunner
from cli.script import cli
def get_graph_code():
return '''
from copy import deepcopy as dc
class StringCopier(object):
def __init__(self):
self.copied_strings = set()
def copy(self):
string1 = 'this'
string2 = dc(string1)
string1.add(string1)
return string2
class DoSomething(object):
def something(self):
copier = StringCopier()
copied_string = copier.copy()
'''
def test_produce_graph():
runner = CliRunner()
with runner.isolated_filesystem():
with open('code.py', 'w') as f:
f.write(get_graph_code())
runner.invoke(cli, ['code.py', '--output', 'code_output'])
assert 'code_output' in os.listdir(os.path.curdir)
assert 'code_output.pdf' in os.listdir(os.path.curdir)
def test_file_extension():
runner = CliRunner()
with runner.isolated_filesystem():
with open('code.py', 'w') as f:
f.write(get_graph_code())
runner.invoke(cli, ['code.py', '--output', 'code_output', '--output-format', 'png'])
assert 'code_output' in os.listdir(os.path.curdir)
assert 'code_output.png' in os.listdir(os.path.curdir)
assert 'code_output.pdf' not in os.listdir(os.path.curdir)
|
<commit_before><commit_msg>Add testing for graphing code<commit_after>import os
from click.testing import CliRunner
from cli.script import cli
def get_graph_code():
return '''
from copy import deepcopy as dc
class StringCopier(object):
def __init__(self):
self.copied_strings = set()
def copy(self):
string1 = 'this'
string2 = dc(string1)
string1.add(string1)
return string2
class DoSomething(object):
def something(self):
copier = StringCopier()
copied_string = copier.copy()
'''
def test_produce_graph():
runner = CliRunner()
with runner.isolated_filesystem():
with open('code.py', 'w') as f:
f.write(get_graph_code())
runner.invoke(cli, ['code.py', '--output', 'code_output'])
assert 'code_output' in os.listdir(os.path.curdir)
assert 'code_output.pdf' in os.listdir(os.path.curdir)
def test_file_extension():
runner = CliRunner()
with runner.isolated_filesystem():
with open('code.py', 'w') as f:
f.write(get_graph_code())
runner.invoke(cli, ['code.py', '--output', 'code_output', '--output-format', 'png'])
assert 'code_output' in os.listdir(os.path.curdir)
assert 'code_output.png' in os.listdir(os.path.curdir)
assert 'code_output.pdf' not in os.listdir(os.path.curdir)
|
|
5ae58621bd766aeaa6f1838397b045039568887c
|
platesolve.py
|
platesolve.py
|
import babeldix
import sys
import operator
# Print solutions in order of increasing score
for plate in sys.argv[1:]:
solns = babeldix.Plates.get_solutions(plate)
for (soln,score) in sorted(solns.items(), key=operator.itemgetter(1)):
print '{0:s} {1:d} {2:s}'.format(plate,score,soln)
|
Add driver to find plate solutions
|
Add driver to find plate solutions
|
Python
|
mit
|
dkirkby/babeldix
|
Add driver to find plate solutions
|
import babeldix
import sys
import operator
# Print solutions in order of increasing score
for plate in sys.argv[1:]:
solns = babeldix.Plates.get_solutions(plate)
for (soln,score) in sorted(solns.items(), key=operator.itemgetter(1)):
print '{0:s} {1:d} {2:s}'.format(plate,score,soln)
|
<commit_before><commit_msg>Add driver to find plate solutions<commit_after>
|
import babeldix
import sys
import operator
# Print solutions in order of increasing score
for plate in sys.argv[1:]:
solns = babeldix.Plates.get_solutions(plate)
for (soln,score) in sorted(solns.items(), key=operator.itemgetter(1)):
print '{0:s} {1:d} {2:s}'.format(plate,score,soln)
|
Add driver to find plate solutionsimport babeldix
import sys
import operator
# Print solutions in order of increasing score
for plate in sys.argv[1:]:
solns = babeldix.Plates.get_solutions(plate)
for (soln,score) in sorted(solns.items(), key=operator.itemgetter(1)):
print '{0:s} {1:d} {2:s}'.format(plate,score,soln)
|
<commit_before><commit_msg>Add driver to find plate solutions<commit_after>import babeldix
import sys
import operator
# Print solutions in order of increasing score
for plate in sys.argv[1:]:
solns = babeldix.Plates.get_solutions(plate)
for (soln,score) in sorted(solns.items(), key=operator.itemgetter(1)):
print '{0:s} {1:d} {2:s}'.format(plate,score,soln)
|
|
c1bfe92878edc3f9598a6d97046775cb8d9b0aa0
|
depot/migrations/0009_auto_20170330_1342.py
|
depot/migrations/0009_auto_20170330_1342.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-30 13:42
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('depot', '0008_auto_20170330_0855'),
]
operations = [
migrations.AlterField(
model_name='item',
name='visibility',
field=models.CharField(choices=[('1', 'public'), ('2', 'private'), ('3', 'deleted')], max_length=1),
),
]
|
Make migration for item-visibility change
|
Make migration for item-visibility change
|
Python
|
agpl-3.0
|
verleihtool/verleihtool,verleihtool/verleihtool,verleihtool/verleihtool,verleihtool/verleihtool
|
Make migration for item-visibility change
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-30 13:42
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('depot', '0008_auto_20170330_0855'),
]
operations = [
migrations.AlterField(
model_name='item',
name='visibility',
field=models.CharField(choices=[('1', 'public'), ('2', 'private'), ('3', 'deleted')], max_length=1),
),
]
|
<commit_before><commit_msg>Make migration for item-visibility change<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-30 13:42
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('depot', '0008_auto_20170330_0855'),
]
operations = [
migrations.AlterField(
model_name='item',
name='visibility',
field=models.CharField(choices=[('1', 'public'), ('2', 'private'), ('3', 'deleted')], max_length=1),
),
]
|
Make migration for item-visibility change# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-30 13:42
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('depot', '0008_auto_20170330_0855'),
]
operations = [
migrations.AlterField(
model_name='item',
name='visibility',
field=models.CharField(choices=[('1', 'public'), ('2', 'private'), ('3', 'deleted')], max_length=1),
),
]
|
<commit_before><commit_msg>Make migration for item-visibility change<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-30 13:42
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('depot', '0008_auto_20170330_0855'),
]
operations = [
migrations.AlterField(
model_name='item',
name='visibility',
field=models.CharField(choices=[('1', 'public'), ('2', 'private'), ('3', 'deleted')], max_length=1),
),
]
|
|
0e53f398bf2cf885393865ec1f899308bb56625b
|
examples/create_a_view_low_level.py
|
examples/create_a_view_low_level.py
|
"""
A low level example:
This is how JenkinsAPI creates views
"""
import requests
import json
url = 'http://localhost:8080/newView'
str_view_name = "ddsfddfd"
params = {}# {'name': str_view_name}
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
data = {
"mode": "hudson.model.ListView",
#"Submit": "OK",
"name": str_view_name
}
# Try 1
result = requests.post(url, params=params, data={'json':json.dumps(data)}, headers=headers)
print result.text.encode('UTF-8')
|
Add a low-level example for creating views.
|
Add a low-level example for creating views.
|
Python
|
mit
|
mistermocha/jenkinsapi,imsardine/jenkinsapi,mistermocha/jenkinsapi,JohnLZeller/jenkinsapi,JohnLZeller/jenkinsapi,aerickson/jenkinsapi,zaro0508/jenkinsapi,imsardine/jenkinsapi,zaro0508/jenkinsapi,jduan/jenkinsapi,domenkozar/jenkinsapi,mistermocha/jenkinsapi,jduan/jenkinsapi,salimfadhley/jenkinsapi,salimfadhley/jenkinsapi,aerickson/jenkinsapi,JohnLZeller/jenkinsapi,domenkozar/jenkinsapi,imsardine/jenkinsapi,zaro0508/jenkinsapi
|
Add a low-level example for creating views.
|
"""
A low level example:
This is how JenkinsAPI creates views
"""
import requests
import json
url = 'http://localhost:8080/newView'
str_view_name = "ddsfddfd"
params = {}# {'name': str_view_name}
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
data = {
"mode": "hudson.model.ListView",
#"Submit": "OK",
"name": str_view_name
}
# Try 1
result = requests.post(url, params=params, data={'json':json.dumps(data)}, headers=headers)
print result.text.encode('UTF-8')
|
<commit_before><commit_msg>Add a low-level example for creating views.<commit_after>
|
"""
A low level example:
This is how JenkinsAPI creates views
"""
import requests
import json
url = 'http://localhost:8080/newView'
str_view_name = "ddsfddfd"
params = {}# {'name': str_view_name}
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
data = {
"mode": "hudson.model.ListView",
#"Submit": "OK",
"name": str_view_name
}
# Try 1
result = requests.post(url, params=params, data={'json':json.dumps(data)}, headers=headers)
print result.text.encode('UTF-8')
|
Add a low-level example for creating views."""
A low level example:
This is how JenkinsAPI creates views
"""
import requests
import json
url = 'http://localhost:8080/newView'
str_view_name = "ddsfddfd"
params = {}# {'name': str_view_name}
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
data = {
"mode": "hudson.model.ListView",
#"Submit": "OK",
"name": str_view_name
}
# Try 1
result = requests.post(url, params=params, data={'json':json.dumps(data)}, headers=headers)
print result.text.encode('UTF-8')
|
<commit_before><commit_msg>Add a low-level example for creating views.<commit_after>"""
A low level example:
This is how JenkinsAPI creates views
"""
import requests
import json
url = 'http://localhost:8080/newView'
str_view_name = "ddsfddfd"
params = {}# {'name': str_view_name}
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
data = {
"mode": "hudson.model.ListView",
#"Submit": "OK",
"name": str_view_name
}
# Try 1
result = requests.post(url, params=params, data={'json':json.dumps(data)}, headers=headers)
print result.text.encode('UTF-8')
|
|
4c73cad398d5dac85b264187f709a860f356b311
|
smipyping/_mysqldbmixin.py
|
smipyping/_mysqldbmixin.py
|
#!/usr/bin/env python
# (C) Copyright 2017 Inova Development Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, absolute_import
from mysql.connector import MySQLConnection
class MySQLDBMixin(object):
"""
Provides some common methods to mixin in with the MySQL...Tables
classes
"""
def connectdb(self, db_dict, verbose):
"""Connect the db"""
try:
connection = MySQLConnection(host=db_dict['host'],
database=db_dict['database'],
user=db_dict['user'],
password=db_dict['password'])
if connection.is_connected():
self.connection = connection
if verbose:
print('sql db connection established. host %s, db %s' %
(db_dict['host'], db_dict['database']))
else:
print('SQL database connection failed. host %s, db %s' %
(db_dict['host'], db_dict['database']))
raise ValueError('Connection to database failed')
except Exception as ex:
raise ValueError('Could not connect to sql database %r. '
' Exception: %r'
% (db_dict, ex))
def _load_table(self):
"""
Load the internal dictionary from the database based on the
fields definition
"""
try:
cursor = self.connection.cursor(dictionary=True)
fields = ', '.join(self.fields)
sql = 'SELECT %s FROM %s' % (fields, self.table_name)
cursor.execute(sql)
rows = cursor.fetchall()
for row in rows:
key = row[self.key_field]
self.data_dict[key] = row
except Exception as ex:
raise ValueError('Error: setup sql based targets table %r. '
'Exception: %r'
% (self.db_dict, ex))
|
Add new file with mixin for mysql
|
Add new file with mixin for mysql
|
Python
|
mit
|
KSchopmeyer/smipyping,KSchopmeyer/smipyping,KSchopmeyer/smipyping,KSchopmeyer/smipyping,KSchopmeyer/smipyping
|
Add new file with mixin for mysql
|
#!/usr/bin/env python
# (C) Copyright 2017 Inova Development Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, absolute_import
from mysql.connector import MySQLConnection
class MySQLDBMixin(object):
"""
Provides some common methods to mixin in with the MySQL...Tables
classes
"""
def connectdb(self, db_dict, verbose):
"""Connect the db"""
try:
connection = MySQLConnection(host=db_dict['host'],
database=db_dict['database'],
user=db_dict['user'],
password=db_dict['password'])
if connection.is_connected():
self.connection = connection
if verbose:
print('sql db connection established. host %s, db %s' %
(db_dict['host'], db_dict['database']))
else:
print('SQL database connection failed. host %s, db %s' %
(db_dict['host'], db_dict['database']))
raise ValueError('Connection to database failed')
except Exception as ex:
raise ValueError('Could not connect to sql database %r. '
' Exception: %r'
% (db_dict, ex))
def _load_table(self):
"""
Load the internal dictionary from the database based on the
fields definition
"""
try:
cursor = self.connection.cursor(dictionary=True)
fields = ', '.join(self.fields)
sql = 'SELECT %s FROM %s' % (fields, self.table_name)
cursor.execute(sql)
rows = cursor.fetchall()
for row in rows:
key = row[self.key_field]
self.data_dict[key] = row
except Exception as ex:
raise ValueError('Error: setup sql based targets table %r. '
'Exception: %r'
% (self.db_dict, ex))
|
<commit_before><commit_msg>Add new file with mixin for mysql<commit_after>
|
#!/usr/bin/env python
# (C) Copyright 2017 Inova Development Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, absolute_import
from mysql.connector import MySQLConnection
class MySQLDBMixin(object):
"""
Provides some common methods to mixin in with the MySQL...Tables
classes
"""
def connectdb(self, db_dict, verbose):
"""Connect the db"""
try:
connection = MySQLConnection(host=db_dict['host'],
database=db_dict['database'],
user=db_dict['user'],
password=db_dict['password'])
if connection.is_connected():
self.connection = connection
if verbose:
print('sql db connection established. host %s, db %s' %
(db_dict['host'], db_dict['database']))
else:
print('SQL database connection failed. host %s, db %s' %
(db_dict['host'], db_dict['database']))
raise ValueError('Connection to database failed')
except Exception as ex:
raise ValueError('Could not connect to sql database %r. '
' Exception: %r'
% (db_dict, ex))
def _load_table(self):
"""
Load the internal dictionary from the database based on the
fields definition
"""
try:
cursor = self.connection.cursor(dictionary=True)
fields = ', '.join(self.fields)
sql = 'SELECT %s FROM %s' % (fields, self.table_name)
cursor.execute(sql)
rows = cursor.fetchall()
for row in rows:
key = row[self.key_field]
self.data_dict[key] = row
except Exception as ex:
raise ValueError('Error: setup sql based targets table %r. '
'Exception: %r'
% (self.db_dict, ex))
|
Add new file with mixin for mysql#!/usr/bin/env python
# (C) Copyright 2017 Inova Development Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, absolute_import
from mysql.connector import MySQLConnection
class MySQLDBMixin(object):
"""
Provides some common methods to mixin in with the MySQL...Tables
classes
"""
def connectdb(self, db_dict, verbose):
"""Connect the db"""
try:
connection = MySQLConnection(host=db_dict['host'],
database=db_dict['database'],
user=db_dict['user'],
password=db_dict['password'])
if connection.is_connected():
self.connection = connection
if verbose:
print('sql db connection established. host %s, db %s' %
(db_dict['host'], db_dict['database']))
else:
print('SQL database connection failed. host %s, db %s' %
(db_dict['host'], db_dict['database']))
raise ValueError('Connection to database failed')
except Exception as ex:
raise ValueError('Could not connect to sql database %r. '
' Exception: %r'
% (db_dict, ex))
def _load_table(self):
"""
Load the internal dictionary from the database based on the
fields definition
"""
try:
cursor = self.connection.cursor(dictionary=True)
fields = ', '.join(self.fields)
sql = 'SELECT %s FROM %s' % (fields, self.table_name)
cursor.execute(sql)
rows = cursor.fetchall()
for row in rows:
key = row[self.key_field]
self.data_dict[key] = row
except Exception as ex:
raise ValueError('Error: setup sql based targets table %r. '
'Exception: %r'
% (self.db_dict, ex))
|
<commit_before><commit_msg>Add new file with mixin for mysql<commit_after>#!/usr/bin/env python
# (C) Copyright 2017 Inova Development Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, absolute_import
from mysql.connector import MySQLConnection
class MySQLDBMixin(object):
"""
Provides some common methods to mixin in with the MySQL...Tables
classes
"""
def connectdb(self, db_dict, verbose):
"""Connect the db"""
try:
connection = MySQLConnection(host=db_dict['host'],
database=db_dict['database'],
user=db_dict['user'],
password=db_dict['password'])
if connection.is_connected():
self.connection = connection
if verbose:
print('sql db connection established. host %s, db %s' %
(db_dict['host'], db_dict['database']))
else:
print('SQL database connection failed. host %s, db %s' %
(db_dict['host'], db_dict['database']))
raise ValueError('Connection to database failed')
except Exception as ex:
raise ValueError('Could not connect to sql database %r. '
' Exception: %r'
% (db_dict, ex))
def _load_table(self):
"""
Load the internal dictionary from the database based on the
fields definition
"""
try:
cursor = self.connection.cursor(dictionary=True)
fields = ', '.join(self.fields)
sql = 'SELECT %s FROM %s' % (fields, self.table_name)
cursor.execute(sql)
rows = cursor.fetchall()
for row in rows:
key = row[self.key_field]
self.data_dict[key] = row
except Exception as ex:
raise ValueError('Error: setup sql based targets table %r. '
'Exception: %r'
% (self.db_dict, ex))
|
|
c57c672aae98fb5b280f70b68ac27fc2d94a243f
|
tests/estimator/classifier/RandomForestClassifier/RandomForestClassifierGoTest.py
|
tests/estimator/classifier/RandomForestClassifier/RandomForestClassifierGoTest.py
|
# -*- coding: utf-8 -*-
from unittest import TestCase
from sklearn.ensemble import RandomForestClassifier
from tests.estimator.classifier.Classifier import Classifier
from tests.language.Go import Go
class RandomForestClassifierGoTest(Go, Classifier, TestCase):
def setUp(self):
super(RandomForestClassifierGoTest, self).setUp()
self.estimator = RandomForestClassifier(n_estimators=100,
random_state=0)
def tearDown(self):
super(RandomForestClassifierGoTest, self).tearDown()
|
Add test class to cover the RandomForestClassifier in Go
|
Add test class to cover the RandomForestClassifier in Go
|
Python
|
bsd-3-clause
|
nok/sklearn-porter
|
Add test class to cover the RandomForestClassifier in Go
|
# -*- coding: utf-8 -*-
from unittest import TestCase
from sklearn.ensemble import RandomForestClassifier
from tests.estimator.classifier.Classifier import Classifier
from tests.language.Go import Go
class RandomForestClassifierGoTest(Go, Classifier, TestCase):
def setUp(self):
super(RandomForestClassifierGoTest, self).setUp()
self.estimator = RandomForestClassifier(n_estimators=100,
random_state=0)
def tearDown(self):
super(RandomForestClassifierGoTest, self).tearDown()
|
<commit_before><commit_msg>Add test class to cover the RandomForestClassifier in Go<commit_after>
|
# -*- coding: utf-8 -*-
from unittest import TestCase
from sklearn.ensemble import RandomForestClassifier
from tests.estimator.classifier.Classifier import Classifier
from tests.language.Go import Go
class RandomForestClassifierGoTest(Go, Classifier, TestCase):
def setUp(self):
super(RandomForestClassifierGoTest, self).setUp()
self.estimator = RandomForestClassifier(n_estimators=100,
random_state=0)
def tearDown(self):
super(RandomForestClassifierGoTest, self).tearDown()
|
Add test class to cover the RandomForestClassifier in Go# -*- coding: utf-8 -*-
from unittest import TestCase
from sklearn.ensemble import RandomForestClassifier
from tests.estimator.classifier.Classifier import Classifier
from tests.language.Go import Go
class RandomForestClassifierGoTest(Go, Classifier, TestCase):
def setUp(self):
super(RandomForestClassifierGoTest, self).setUp()
self.estimator = RandomForestClassifier(n_estimators=100,
random_state=0)
def tearDown(self):
super(RandomForestClassifierGoTest, self).tearDown()
|
<commit_before><commit_msg>Add test class to cover the RandomForestClassifier in Go<commit_after># -*- coding: utf-8 -*-
from unittest import TestCase
from sklearn.ensemble import RandomForestClassifier
from tests.estimator.classifier.Classifier import Classifier
from tests.language.Go import Go
class RandomForestClassifierGoTest(Go, Classifier, TestCase):
def setUp(self):
super(RandomForestClassifierGoTest, self).setUp()
self.estimator = RandomForestClassifier(n_estimators=100,
random_state=0)
def tearDown(self):
super(RandomForestClassifierGoTest, self).tearDown()
|
|
ab99f855f708dec213c9eea1489643c01526e0b0
|
lib/bridgedb/test/test_parse_versions.py
|
lib/bridgedb/test/test_parse_versions.py
|
# -*- coding: utf-8 -*-
#_____________________________________________________________________________
#
# This file is part of BridgeDB, a Tor bridge distribution system.
#
# :authors: Isis Lovecruft 0xA3ADB67A2CDB8B35 <isis@torproject.org>
# please also see AUTHORS file
# :copyright: (c) 2014, The Tor Project, Inc.
# (c) 2014, Isis Lovecruft
# :license: see LICENSE for licensing information
#_____________________________________________________________________________
"""Unittests for :mod:`bridgedb.parse.versions`."""
from __future__ import print_function
from twisted.trial import unittest
from bridgedb.parse import versions
class ParseVersionTests(unittest.TestCase):
"""Unitests for :class:`bridgedb.parse.versions.Version`."""
def test_Version_with_bad_delimiter(self):
"""Test parsing a version number which uses '-' as a delimiter."""
self.assertRaises(versions.InvalidVersionStringFormat,
versions.Version, '2-6-0', package='tor')
def test_Version_really_long_version_string(self):
"""Parsing a version number which is way too long should raise
an IndexError which is ignored.
"""
v = versions.Version('2.6.0.0.beta', package='tor')
self.assertEqual(v.prerelease, 'beta')
self.assertEqual(v.major, 6)
def test_Version_string(self):
"""Test converting a valid Version object into string form."""
v = versions.Version('0.2.5.4', package='tor')
self.assertEqual(v.base(), '0.2.5.4')
|
Add unittests for bridgedb.parse.versions module.
|
Add unittests for bridgedb.parse.versions module.
|
Python
|
bsd-3-clause
|
pagea/bridgedb,pagea/bridgedb
|
Add unittests for bridgedb.parse.versions module.
|
# -*- coding: utf-8 -*-
#_____________________________________________________________________________
#
# This file is part of BridgeDB, a Tor bridge distribution system.
#
# :authors: Isis Lovecruft 0xA3ADB67A2CDB8B35 <isis@torproject.org>
# please also see AUTHORS file
# :copyright: (c) 2014, The Tor Project, Inc.
# (c) 2014, Isis Lovecruft
# :license: see LICENSE for licensing information
#_____________________________________________________________________________
"""Unittests for :mod:`bridgedb.parse.versions`."""
from __future__ import print_function
from twisted.trial import unittest
from bridgedb.parse import versions
class ParseVersionTests(unittest.TestCase):
"""Unitests for :class:`bridgedb.parse.versions.Version`."""
def test_Version_with_bad_delimiter(self):
"""Test parsing a version number which uses '-' as a delimiter."""
self.assertRaises(versions.InvalidVersionStringFormat,
versions.Version, '2-6-0', package='tor')
def test_Version_really_long_version_string(self):
"""Parsing a version number which is way too long should raise
an IndexError which is ignored.
"""
v = versions.Version('2.6.0.0.beta', package='tor')
self.assertEqual(v.prerelease, 'beta')
self.assertEqual(v.major, 6)
def test_Version_string(self):
"""Test converting a valid Version object into string form."""
v = versions.Version('0.2.5.4', package='tor')
self.assertEqual(v.base(), '0.2.5.4')
|
<commit_before><commit_msg>Add unittests for bridgedb.parse.versions module.<commit_after>
|
# -*- coding: utf-8 -*-
#_____________________________________________________________________________
#
# This file is part of BridgeDB, a Tor bridge distribution system.
#
# :authors: Isis Lovecruft 0xA3ADB67A2CDB8B35 <isis@torproject.org>
# please also see AUTHORS file
# :copyright: (c) 2014, The Tor Project, Inc.
# (c) 2014, Isis Lovecruft
# :license: see LICENSE for licensing information
#_____________________________________________________________________________
"""Unittests for :mod:`bridgedb.parse.versions`."""
from __future__ import print_function
from twisted.trial import unittest
from bridgedb.parse import versions
class ParseVersionTests(unittest.TestCase):
"""Unitests for :class:`bridgedb.parse.versions.Version`."""
def test_Version_with_bad_delimiter(self):
"""Test parsing a version number which uses '-' as a delimiter."""
self.assertRaises(versions.InvalidVersionStringFormat,
versions.Version, '2-6-0', package='tor')
def test_Version_really_long_version_string(self):
"""Parsing a version number which is way too long should raise
an IndexError which is ignored.
"""
v = versions.Version('2.6.0.0.beta', package='tor')
self.assertEqual(v.prerelease, 'beta')
self.assertEqual(v.major, 6)
def test_Version_string(self):
"""Test converting a valid Version object into string form."""
v = versions.Version('0.2.5.4', package='tor')
self.assertEqual(v.base(), '0.2.5.4')
|
Add unittests for bridgedb.parse.versions module.# -*- coding: utf-8 -*-
#_____________________________________________________________________________
#
# This file is part of BridgeDB, a Tor bridge distribution system.
#
# :authors: Isis Lovecruft 0xA3ADB67A2CDB8B35 <isis@torproject.org>
# please also see AUTHORS file
# :copyright: (c) 2014, The Tor Project, Inc.
# (c) 2014, Isis Lovecruft
# :license: see LICENSE for licensing information
#_____________________________________________________________________________
"""Unittests for :mod:`bridgedb.parse.versions`."""
from __future__ import print_function
from twisted.trial import unittest
from bridgedb.parse import versions
class ParseVersionTests(unittest.TestCase):
"""Unitests for :class:`bridgedb.parse.versions.Version`."""
def test_Version_with_bad_delimiter(self):
"""Test parsing a version number which uses '-' as a delimiter."""
self.assertRaises(versions.InvalidVersionStringFormat,
versions.Version, '2-6-0', package='tor')
def test_Version_really_long_version_string(self):
"""Parsing a version number which is way too long should raise
an IndexError which is ignored.
"""
v = versions.Version('2.6.0.0.beta', package='tor')
self.assertEqual(v.prerelease, 'beta')
self.assertEqual(v.major, 6)
def test_Version_string(self):
"""Test converting a valid Version object into string form."""
v = versions.Version('0.2.5.4', package='tor')
self.assertEqual(v.base(), '0.2.5.4')
|
<commit_before><commit_msg>Add unittests for bridgedb.parse.versions module.<commit_after># -*- coding: utf-8 -*-
#_____________________________________________________________________________
#
# This file is part of BridgeDB, a Tor bridge distribution system.
#
# :authors: Isis Lovecruft 0xA3ADB67A2CDB8B35 <isis@torproject.org>
# please also see AUTHORS file
# :copyright: (c) 2014, The Tor Project, Inc.
# (c) 2014, Isis Lovecruft
# :license: see LICENSE for licensing information
#_____________________________________________________________________________
"""Unittests for :mod:`bridgedb.parse.versions`."""
from __future__ import print_function
from twisted.trial import unittest
from bridgedb.parse import versions
class ParseVersionTests(unittest.TestCase):
"""Unitests for :class:`bridgedb.parse.versions.Version`."""
def test_Version_with_bad_delimiter(self):
"""Test parsing a version number which uses '-' as a delimiter."""
self.assertRaises(versions.InvalidVersionStringFormat,
versions.Version, '2-6-0', package='tor')
def test_Version_really_long_version_string(self):
"""Parsing a version number which is way too long should raise
an IndexError which is ignored.
"""
v = versions.Version('2.6.0.0.beta', package='tor')
self.assertEqual(v.prerelease, 'beta')
self.assertEqual(v.major, 6)
def test_Version_string(self):
"""Test converting a valid Version object into string form."""
v = versions.Version('0.2.5.4', package='tor')
self.assertEqual(v.base(), '0.2.5.4')
|
|
1a4db50c848a3e7bb1323ae9e6b26c884187c575
|
training/level-1-the-zen-of-python/dragon-warrior/fibonacci/rwharris-nd_fibonacci.py
|
training/level-1-the-zen-of-python/dragon-warrior/fibonacci/rwharris-nd_fibonacci.py
|
def even_fibonacci_sum(a:int,b:int,max:int) -> int:
temp = 0
sum = 0
while (b <= max):
if (b%2 == 0):
sum += b
temp = a + b
a = b
b = temp
print(sum)
even_fibonacci_sum(1,2,4000000)
|
Add my fibonacci sequence homework.
|
Add my fibonacci sequence homework.
|
Python
|
artistic-2.0
|
bigfatpanda-training/pandas-practical-python-primer,bigfatpanda-training/pandas-practical-python-primer
|
Add my fibonacci sequence homework.
|
def even_fibonacci_sum(a:int,b:int,max:int) -> int:
temp = 0
sum = 0
while (b <= max):
if (b%2 == 0):
sum += b
temp = a + b
a = b
b = temp
print(sum)
even_fibonacci_sum(1,2,4000000)
|
<commit_before><commit_msg>Add my fibonacci sequence homework.<commit_after>
|
def even_fibonacci_sum(a:int,b:int,max:int) -> int:
temp = 0
sum = 0
while (b <= max):
if (b%2 == 0):
sum += b
temp = a + b
a = b
b = temp
print(sum)
even_fibonacci_sum(1,2,4000000)
|
Add my fibonacci sequence homework.def even_fibonacci_sum(a:int,b:int,max:int) -> int:
temp = 0
sum = 0
while (b <= max):
if (b%2 == 0):
sum += b
temp = a + b
a = b
b = temp
print(sum)
even_fibonacci_sum(1,2,4000000)
|
<commit_before><commit_msg>Add my fibonacci sequence homework.<commit_after>def even_fibonacci_sum(a:int,b:int,max:int) -> int:
temp = 0
sum = 0
while (b <= max):
if (b%2 == 0):
sum += b
temp = a + b
a = b
b = temp
print(sum)
even_fibonacci_sum(1,2,4000000)
|
|
f14c483283984b793f1209255e059d7b9deb414c
|
migrations/versions/8081a5906af_.py
|
migrations/versions/8081a5906af_.py
|
"""empty message
Revision ID: 8081a5906af
Revises: 575d8824e34c
Create Date: 2015-08-25 18:04:56.738898
"""
# revision identifiers, used by Alembic.
revision = '8081a5906af'
down_revision = '575d8824e34c'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('organization', sa.Column('member_count', sa.Integer(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('organization', 'member_count')
### end Alembic commands ###
|
Add in the db migration
|
Add in the db migration
|
Python
|
mit
|
MaxPresman/cfapi,smalley/cfapi,smalley/cfapi,codeforamerica/cfapi,codeforamerica/cfapi,MaxPresman/cfapi,MaxPresman/cfapi,smalley/cfapi
|
Add in the db migration
|
"""empty message
Revision ID: 8081a5906af
Revises: 575d8824e34c
Create Date: 2015-08-25 18:04:56.738898
"""
# revision identifiers, used by Alembic.
revision = '8081a5906af'
down_revision = '575d8824e34c'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('organization', sa.Column('member_count', sa.Integer(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('organization', 'member_count')
### end Alembic commands ###
|
<commit_before><commit_msg>Add in the db migration<commit_after>
|
"""empty message
Revision ID: 8081a5906af
Revises: 575d8824e34c
Create Date: 2015-08-25 18:04:56.738898
"""
# revision identifiers, used by Alembic.
revision = '8081a5906af'
down_revision = '575d8824e34c'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('organization', sa.Column('member_count', sa.Integer(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('organization', 'member_count')
### end Alembic commands ###
|
Add in the db migration"""empty message
Revision ID: 8081a5906af
Revises: 575d8824e34c
Create Date: 2015-08-25 18:04:56.738898
"""
# revision identifiers, used by Alembic.
revision = '8081a5906af'
down_revision = '575d8824e34c'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('organization', sa.Column('member_count', sa.Integer(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('organization', 'member_count')
### end Alembic commands ###
|
<commit_before><commit_msg>Add in the db migration<commit_after>"""empty message
Revision ID: 8081a5906af
Revises: 575d8824e34c
Create Date: 2015-08-25 18:04:56.738898
"""
# revision identifiers, used by Alembic.
revision = '8081a5906af'
down_revision = '575d8824e34c'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('organization', sa.Column('member_count', sa.Integer(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('organization', 'member_count')
### end Alembic commands ###
|
|
a4bc16a375dc30e37034993bd07d3014f3b936e1
|
migrations/versions/201610041721_8b5ab7da2d5_fix_corrupt_abstract_field_data.py
|
migrations/versions/201610041721_8b5ab7da2d5_fix_corrupt_abstract_field_data.py
|
"""Fix corrupt abstract field data
Revision ID: 8b5ab7da2d5
Revises: 52d970fb6a74
Create Date: 2016-10-04 17:21:19.186125
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '8b5ab7da2d5'
down_revision = '52d970fb6a74'
def upgrade():
# We don't want any dicts in abstract field values...
# Single choice fields with no value should be `null`, text fields should be empty
op.execute('''
UPDATE event_abstracts.abstract_field_values fv
SET data = 'null'::json
FROM events.contribution_fields cf
WHERE data::jsonb = '{}'::jsonb AND cf.id = fv.contribution_field_id AND cf.field_type = 'single_choice';
UPDATE event_abstracts.abstract_field_values fv
SET data = '""'::json
FROM events.contribution_fields cf
WHERE data::jsonb = '{}'::jsonb AND cf.id = fv.contribution_field_id AND cf.field_type = 'text';
''')
def downgrade():
pass
|
Fix corrupt abstract field data
|
Fix corrupt abstract field data
|
Python
|
mit
|
pferreir/indico,mic4ael/indico,ThiefMaster/indico,mvidalgarcia/indico,DirkHoffmann/indico,indico/indico,DirkHoffmann/indico,mvidalgarcia/indico,indico/indico,pferreir/indico,indico/indico,ThiefMaster/indico,OmeGak/indico,DirkHoffmann/indico,indico/indico,mic4ael/indico,mic4ael/indico,mic4ael/indico,ThiefMaster/indico,OmeGak/indico,DirkHoffmann/indico,pferreir/indico,OmeGak/indico,mvidalgarcia/indico,OmeGak/indico,ThiefMaster/indico,mvidalgarcia/indico,pferreir/indico
|
Fix corrupt abstract field data
|
"""Fix corrupt abstract field data
Revision ID: 8b5ab7da2d5
Revises: 52d970fb6a74
Create Date: 2016-10-04 17:21:19.186125
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '8b5ab7da2d5'
down_revision = '52d970fb6a74'
def upgrade():
# We don't want any dicts in abstract field values...
# Single choice fields with no value should be `null`, text fields should be empty
op.execute('''
UPDATE event_abstracts.abstract_field_values fv
SET data = 'null'::json
FROM events.contribution_fields cf
WHERE data::jsonb = '{}'::jsonb AND cf.id = fv.contribution_field_id AND cf.field_type = 'single_choice';
UPDATE event_abstracts.abstract_field_values fv
SET data = '""'::json
FROM events.contribution_fields cf
WHERE data::jsonb = '{}'::jsonb AND cf.id = fv.contribution_field_id AND cf.field_type = 'text';
''')
def downgrade():
pass
|
<commit_before><commit_msg>Fix corrupt abstract field data<commit_after>
|
"""Fix corrupt abstract field data
Revision ID: 8b5ab7da2d5
Revises: 52d970fb6a74
Create Date: 2016-10-04 17:21:19.186125
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '8b5ab7da2d5'
down_revision = '52d970fb6a74'
def upgrade():
# We don't want any dicts in abstract field values...
# Single choice fields with no value should be `null`, text fields should be empty
op.execute('''
UPDATE event_abstracts.abstract_field_values fv
SET data = 'null'::json
FROM events.contribution_fields cf
WHERE data::jsonb = '{}'::jsonb AND cf.id = fv.contribution_field_id AND cf.field_type = 'single_choice';
UPDATE event_abstracts.abstract_field_values fv
SET data = '""'::json
FROM events.contribution_fields cf
WHERE data::jsonb = '{}'::jsonb AND cf.id = fv.contribution_field_id AND cf.field_type = 'text';
''')
def downgrade():
pass
|
Fix corrupt abstract field data"""Fix corrupt abstract field data
Revision ID: 8b5ab7da2d5
Revises: 52d970fb6a74
Create Date: 2016-10-04 17:21:19.186125
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '8b5ab7da2d5'
down_revision = '52d970fb6a74'
def upgrade():
# We don't want any dicts in abstract field values...
# Single choice fields with no value should be `null`, text fields should be empty
op.execute('''
UPDATE event_abstracts.abstract_field_values fv
SET data = 'null'::json
FROM events.contribution_fields cf
WHERE data::jsonb = '{}'::jsonb AND cf.id = fv.contribution_field_id AND cf.field_type = 'single_choice';
UPDATE event_abstracts.abstract_field_values fv
SET data = '""'::json
FROM events.contribution_fields cf
WHERE data::jsonb = '{}'::jsonb AND cf.id = fv.contribution_field_id AND cf.field_type = 'text';
''')
def downgrade():
pass
|
<commit_before><commit_msg>Fix corrupt abstract field data<commit_after>"""Fix corrupt abstract field data
Revision ID: 8b5ab7da2d5
Revises: 52d970fb6a74
Create Date: 2016-10-04 17:21:19.186125
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '8b5ab7da2d5'
down_revision = '52d970fb6a74'
def upgrade():
# We don't want any dicts in abstract field values...
# Single choice fields with no value should be `null`, text fields should be empty
op.execute('''
UPDATE event_abstracts.abstract_field_values fv
SET data = 'null'::json
FROM events.contribution_fields cf
WHERE data::jsonb = '{}'::jsonb AND cf.id = fv.contribution_field_id AND cf.field_type = 'single_choice';
UPDATE event_abstracts.abstract_field_values fv
SET data = '""'::json
FROM events.contribution_fields cf
WHERE data::jsonb = '{}'::jsonb AND cf.id = fv.contribution_field_id AND cf.field_type = 'text';
''')
def downgrade():
pass
|
|
ef745ed086ebd8e77e158c89b577c77296630320
|
Python/118_Pascals_Triangle.py
|
Python/118_Pascals_Triangle.py
|
class Solution(object):
def generate(self, numRows):
"""
:type numRows: int
:rtype: List[List[int]]
"""
res = [[1],[1,1]]
if numRows == 0:
return []
elif numRows == 1:
return [[1]]
else:
old = [1,1]
for i in xrange(numRows-2):
temp = [1]
for j in xrange(len(old)-1):
temp.append(old[j]+old[j+1])
temp.append(1)
res.append(temp)
old = temp
return res
if __name__ == '__main__':
print Solution().generate(6)
|
Add solution for 118 pascals triangle
|
Add solution for 118 pascals triangle
|
Python
|
mit
|
comicxmz001/LeetCode,comicxmz001/LeetCode
|
Add solution for 118 pascals triangle
|
class Solution(object):
def generate(self, numRows):
"""
:type numRows: int
:rtype: List[List[int]]
"""
res = [[1],[1,1]]
if numRows == 0:
return []
elif numRows == 1:
return [[1]]
else:
old = [1,1]
for i in xrange(numRows-2):
temp = [1]
for j in xrange(len(old)-1):
temp.append(old[j]+old[j+1])
temp.append(1)
res.append(temp)
old = temp
return res
if __name__ == '__main__':
print Solution().generate(6)
|
<commit_before><commit_msg>Add solution for 118 pascals triangle<commit_after>
|
class Solution(object):
def generate(self, numRows):
"""
:type numRows: int
:rtype: List[List[int]]
"""
res = [[1],[1,1]]
if numRows == 0:
return []
elif numRows == 1:
return [[1]]
else:
old = [1,1]
for i in xrange(numRows-2):
temp = [1]
for j in xrange(len(old)-1):
temp.append(old[j]+old[j+1])
temp.append(1)
res.append(temp)
old = temp
return res
if __name__ == '__main__':
print Solution().generate(6)
|
Add solution for 118 pascals triangleclass Solution(object):
def generate(self, numRows):
"""
:type numRows: int
:rtype: List[List[int]]
"""
res = [[1],[1,1]]
if numRows == 0:
return []
elif numRows == 1:
return [[1]]
else:
old = [1,1]
for i in xrange(numRows-2):
temp = [1]
for j in xrange(len(old)-1):
temp.append(old[j]+old[j+1])
temp.append(1)
res.append(temp)
old = temp
return res
if __name__ == '__main__':
print Solution().generate(6)
|
<commit_before><commit_msg>Add solution for 118 pascals triangle<commit_after>class Solution(object):
def generate(self, numRows):
"""
:type numRows: int
:rtype: List[List[int]]
"""
res = [[1],[1,1]]
if numRows == 0:
return []
elif numRows == 1:
return [[1]]
else:
old = [1,1]
for i in xrange(numRows-2):
temp = [1]
for j in xrange(len(old)-1):
temp.append(old[j]+old[j+1])
temp.append(1)
res.append(temp)
old = temp
return res
if __name__ == '__main__':
print Solution().generate(6)
|
|
a8b524318d7f9d4406193d610b2bb3ef8e56e147
|
examples/frameless_drag_region.py
|
examples/frameless_drag_region.py
|
import webview
'''
This example demonstrates a user-provided "drag region" to move a frameless window
around, whilst maintaining normal mouse down/move events elsewhere. This roughly
replicates `-webkit-drag-region`.
'''
html = '''
<head>
<style type="text/css">
.pywebview-drag-region {
width: 50px;
height: 50px;
margin-top: 50px;
margin-left: 50px;
background: orange;
}
</style>
</head>
<body>
<div class="pywebview-drag-region">Drag me!</div>
</body>
'''
if __name__ == '__main__':
window = webview.create_window(
'API example',
html=html,
frameless=True,
easy_drag=False,
)
webview.start()
|
Add frameless drag region example.
|
Add frameless drag region example.
|
Python
|
bsd-3-clause
|
r0x0r/pywebview,r0x0r/pywebview,r0x0r/pywebview,r0x0r/pywebview,r0x0r/pywebview
|
Add frameless drag region example.
|
import webview
'''
This example demonstrates a user-provided "drag region" to move a frameless window
around, whilst maintaining normal mouse down/move events elsewhere. This roughly
replicates `-webkit-drag-region`.
'''
html = '''
<head>
<style type="text/css">
.pywebview-drag-region {
width: 50px;
height: 50px;
margin-top: 50px;
margin-left: 50px;
background: orange;
}
</style>
</head>
<body>
<div class="pywebview-drag-region">Drag me!</div>
</body>
'''
if __name__ == '__main__':
window = webview.create_window(
'API example',
html=html,
frameless=True,
easy_drag=False,
)
webview.start()
|
<commit_before><commit_msg>Add frameless drag region example.<commit_after>
|
import webview
'''
This example demonstrates a user-provided "drag region" to move a frameless window
around, whilst maintaining normal mouse down/move events elsewhere. This roughly
replicates `-webkit-drag-region`.
'''
html = '''
<head>
<style type="text/css">
.pywebview-drag-region {
width: 50px;
height: 50px;
margin-top: 50px;
margin-left: 50px;
background: orange;
}
</style>
</head>
<body>
<div class="pywebview-drag-region">Drag me!</div>
</body>
'''
if __name__ == '__main__':
window = webview.create_window(
'API example',
html=html,
frameless=True,
easy_drag=False,
)
webview.start()
|
Add frameless drag region example.import webview
'''
This example demonstrates a user-provided "drag region" to move a frameless window
around, whilst maintaining normal mouse down/move events elsewhere. This roughly
replicates `-webkit-drag-region`.
'''
html = '''
<head>
<style type="text/css">
.pywebview-drag-region {
width: 50px;
height: 50px;
margin-top: 50px;
margin-left: 50px;
background: orange;
}
</style>
</head>
<body>
<div class="pywebview-drag-region">Drag me!</div>
</body>
'''
if __name__ == '__main__':
window = webview.create_window(
'API example',
html=html,
frameless=True,
easy_drag=False,
)
webview.start()
|
<commit_before><commit_msg>Add frameless drag region example.<commit_after>import webview
'''
This example demonstrates a user-provided "drag region" to move a frameless window
around, whilst maintaining normal mouse down/move events elsewhere. This roughly
replicates `-webkit-drag-region`.
'''
html = '''
<head>
<style type="text/css">
.pywebview-drag-region {
width: 50px;
height: 50px;
margin-top: 50px;
margin-left: 50px;
background: orange;
}
</style>
</head>
<body>
<div class="pywebview-drag-region">Drag me!</div>
</body>
'''
if __name__ == '__main__':
window = webview.create_window(
'API example',
html=html,
frameless=True,
easy_drag=False,
)
webview.start()
|
|
2a1b46740c4cf14f7db4f344431aced9bf06d1e7
|
scripts/sync_for_real.py
|
scripts/sync_for_real.py
|
#!/usr/bin/env python3
import subprocess
import sys
from time import time
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def main():
nr_fast = 3
while nr_fast > 0:
eprint('syncing... ', end='', flush=True)
start_t = time()
subprocess.Popen('/usr/bin/sync', stdout=None, stderr=None).wait()
time_length = time() - start_t
eprint('{0:0.3f}'.format(time_length))
if time_length < 0.10:
nr_fast = nr_fast - 1
else:
nr_fast = 3
return 0
if __name__ == '__main__':
sys.exit(main())
|
Add a little program that calls sync until is is done
|
Add a little program that calls sync until is is done
|
Python
|
unlicense
|
paolobolzoni/useful-conf,paolobolzoni/useful-conf,paolobolzoni/useful-conf
|
Add a little program that calls sync until is is done
|
#!/usr/bin/env python3
import subprocess
import sys
from time import time
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def main():
nr_fast = 3
while nr_fast > 0:
eprint('syncing... ', end='', flush=True)
start_t = time()
subprocess.Popen('/usr/bin/sync', stdout=None, stderr=None).wait()
time_length = time() - start_t
eprint('{0:0.3f}'.format(time_length))
if time_length < 0.10:
nr_fast = nr_fast - 1
else:
nr_fast = 3
return 0
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>Add a little program that calls sync until is is done<commit_after>
|
#!/usr/bin/env python3
import subprocess
import sys
from time import time
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def main():
nr_fast = 3
while nr_fast > 0:
eprint('syncing... ', end='', flush=True)
start_t = time()
subprocess.Popen('/usr/bin/sync', stdout=None, stderr=None).wait()
time_length = time() - start_t
eprint('{0:0.3f}'.format(time_length))
if time_length < 0.10:
nr_fast = nr_fast - 1
else:
nr_fast = 3
return 0
if __name__ == '__main__':
sys.exit(main())
|
Add a little program that calls sync until is is done#!/usr/bin/env python3
import subprocess
import sys
from time import time
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def main():
nr_fast = 3
while nr_fast > 0:
eprint('syncing... ', end='', flush=True)
start_t = time()
subprocess.Popen('/usr/bin/sync', stdout=None, stderr=None).wait()
time_length = time() - start_t
eprint('{0:0.3f}'.format(time_length))
if time_length < 0.10:
nr_fast = nr_fast - 1
else:
nr_fast = 3
return 0
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>Add a little program that calls sync until is is done<commit_after>#!/usr/bin/env python3
import subprocess
import sys
from time import time
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def main():
nr_fast = 3
while nr_fast > 0:
eprint('syncing... ', end='', flush=True)
start_t = time()
subprocess.Popen('/usr/bin/sync', stdout=None, stderr=None).wait()
time_length = time() - start_t
eprint('{0:0.3f}'.format(time_length))
if time_length < 0.10:
nr_fast = nr_fast - 1
else:
nr_fast = 3
return 0
if __name__ == '__main__':
sys.exit(main())
|
|
91bb7506bd20ed22b8787e7a8b9975cc07e97175
|
owners_client.py
|
owners_client.py
|
# Copyright (c) 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class OwnersClient(object):
"""Interact with OWNERS files in a repository.
This class allows you to interact with OWNERS files in a repository both the
Gerrit Code-Owners plugin REST API, and the owners database implemented by
Depot Tools in owners.py:
- List all the owners for a change.
- Check if a change has been approved.
- Check if the OWNERS configuration in a change is valid.
All code should use this class to interact with OWNERS files instead of the
owners database in owners.py
"""
def __init__(self, host):
self._host = host
def ListOwnersForFile(self, project, branch, path):
"""List all owners for a file."""
raise Exception('Not implemented')
def IsChangeApproved(self, change_number):
"""Check if the latest patch set for a change has been approved."""
raise Exception('Not implemented')
def IsOwnerConfigurationValid(self, change_number, patch):
"""Check if the owners configuration in a change is valid."""
raise Exception('Not implemented')
|
Add owners client to depot_tools.
|
[depot_tools][owners] Add owners client to depot_tools.
Add an owners API that will be used by Depot Tools to
interact with OWNERS files.
The API will be implemented using both owners.py and the
Gerrit Code-Owners plugin REST API. All Depot Tools code
will be modified to use this API.
Change-Id: I7cf059a0895dbae105a2f0b26568fd7b47068f43
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/tools/depot_tools/+/2517985
Reviewed-by: Anthony Polito <05bb588aa9020353543fee7ac4e6e0a3d0d6f6ed@google.com>
Commit-Queue: Anthony Polito <05bb588aa9020353543fee7ac4e6e0a3d0d6f6ed@google.com>
|
Python
|
bsd-3-clause
|
CoherentLabs/depot_tools,CoherentLabs/depot_tools
|
[depot_tools][owners] Add owners client to depot_tools.
Add an owners API that will be used by Depot Tools to
interact with OWNERS files.
The API will be implemented using both owners.py and the
Gerrit Code-Owners plugin REST API. All Depot Tools code
will be modified to use this API.
Change-Id: I7cf059a0895dbae105a2f0b26568fd7b47068f43
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/tools/depot_tools/+/2517985
Reviewed-by: Anthony Polito <05bb588aa9020353543fee7ac4e6e0a3d0d6f6ed@google.com>
Commit-Queue: Anthony Polito <05bb588aa9020353543fee7ac4e6e0a3d0d6f6ed@google.com>
|
# Copyright (c) 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class OwnersClient(object):
"""Interact with OWNERS files in a repository.
This class allows you to interact with OWNERS files in a repository both the
Gerrit Code-Owners plugin REST API, and the owners database implemented by
Depot Tools in owners.py:
- List all the owners for a change.
- Check if a change has been approved.
- Check if the OWNERS configuration in a change is valid.
All code should use this class to interact with OWNERS files instead of the
owners database in owners.py
"""
def __init__(self, host):
self._host = host
def ListOwnersForFile(self, project, branch, path):
"""List all owners for a file."""
raise Exception('Not implemented')
def IsChangeApproved(self, change_number):
"""Check if the latest patch set for a change has been approved."""
raise Exception('Not implemented')
def IsOwnerConfigurationValid(self, change_number, patch):
"""Check if the owners configuration in a change is valid."""
raise Exception('Not implemented')
|
<commit_before><commit_msg>[depot_tools][owners] Add owners client to depot_tools.
Add an owners API that will be used by Depot Tools to
interact with OWNERS files.
The API will be implemented using both owners.py and the
Gerrit Code-Owners plugin REST API. All Depot Tools code
will be modified to use this API.
Change-Id: I7cf059a0895dbae105a2f0b26568fd7b47068f43
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/tools/depot_tools/+/2517985
Reviewed-by: Anthony Polito <05bb588aa9020353543fee7ac4e6e0a3d0d6f6ed@google.com>
Commit-Queue: Anthony Polito <05bb588aa9020353543fee7ac4e6e0a3d0d6f6ed@google.com><commit_after>
|
# Copyright (c) 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class OwnersClient(object):
"""Interact with OWNERS files in a repository.
This class allows you to interact with OWNERS files in a repository both the
Gerrit Code-Owners plugin REST API, and the owners database implemented by
Depot Tools in owners.py:
- List all the owners for a change.
- Check if a change has been approved.
- Check if the OWNERS configuration in a change is valid.
All code should use this class to interact with OWNERS files instead of the
owners database in owners.py
"""
def __init__(self, host):
self._host = host
def ListOwnersForFile(self, project, branch, path):
"""List all owners for a file."""
raise Exception('Not implemented')
def IsChangeApproved(self, change_number):
"""Check if the latest patch set for a change has been approved."""
raise Exception('Not implemented')
def IsOwnerConfigurationValid(self, change_number, patch):
"""Check if the owners configuration in a change is valid."""
raise Exception('Not implemented')
|
[depot_tools][owners] Add owners client to depot_tools.
Add an owners API that will be used by Depot Tools to
interact with OWNERS files.
The API will be implemented using both owners.py and the
Gerrit Code-Owners plugin REST API. All Depot Tools code
will be modified to use this API.
Change-Id: I7cf059a0895dbae105a2f0b26568fd7b47068f43
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/tools/depot_tools/+/2517985
Reviewed-by: Anthony Polito <05bb588aa9020353543fee7ac4e6e0a3d0d6f6ed@google.com>
Commit-Queue: Anthony Polito <05bb588aa9020353543fee7ac4e6e0a3d0d6f6ed@google.com># Copyright (c) 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class OwnersClient(object):
"""Interact with OWNERS files in a repository.
This class allows you to interact with OWNERS files in a repository both the
Gerrit Code-Owners plugin REST API, and the owners database implemented by
Depot Tools in owners.py:
- List all the owners for a change.
- Check if a change has been approved.
- Check if the OWNERS configuration in a change is valid.
All code should use this class to interact with OWNERS files instead of the
owners database in owners.py
"""
def __init__(self, host):
self._host = host
def ListOwnersForFile(self, project, branch, path):
"""List all owners for a file."""
raise Exception('Not implemented')
def IsChangeApproved(self, change_number):
"""Check if the latest patch set for a change has been approved."""
raise Exception('Not implemented')
def IsOwnerConfigurationValid(self, change_number, patch):
"""Check if the owners configuration in a change is valid."""
raise Exception('Not implemented')
|
<commit_before><commit_msg>[depot_tools][owners] Add owners client to depot_tools.
Add an owners API that will be used by Depot Tools to
interact with OWNERS files.
The API will be implemented using both owners.py and the
Gerrit Code-Owners plugin REST API. All Depot Tools code
will be modified to use this API.
Change-Id: I7cf059a0895dbae105a2f0b26568fd7b47068f43
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/tools/depot_tools/+/2517985
Reviewed-by: Anthony Polito <05bb588aa9020353543fee7ac4e6e0a3d0d6f6ed@google.com>
Commit-Queue: Anthony Polito <05bb588aa9020353543fee7ac4e6e0a3d0d6f6ed@google.com><commit_after># Copyright (c) 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class OwnersClient(object):
"""Interact with OWNERS files in a repository.
This class allows you to interact with OWNERS files in a repository both the
Gerrit Code-Owners plugin REST API, and the owners database implemented by
Depot Tools in owners.py:
- List all the owners for a change.
- Check if a change has been approved.
- Check if the OWNERS configuration in a change is valid.
All code should use this class to interact with OWNERS files instead of the
owners database in owners.py
"""
def __init__(self, host):
self._host = host
def ListOwnersForFile(self, project, branch, path):
"""List all owners for a file."""
raise Exception('Not implemented')
def IsChangeApproved(self, change_number):
"""Check if the latest patch set for a change has been approved."""
raise Exception('Not implemented')
def IsOwnerConfigurationValid(self, change_number, patch):
"""Check if the owners configuration in a change is valid."""
raise Exception('Not implemented')
|
|
1be4e6f97b3d062c4fa07f70b05305bf32593fd4
|
dotbriefs/tests/test_smudge.py
|
dotbriefs/tests/test_smudge.py
|
import unittest
from dotbriefs.smudge import SmudgeTemplate
class TestCleanSecret(unittest.TestCase):
def setUp(self):
self.secrets = {}
self.secrets['password'] = 's3cr3t'
self.secrets['question'] = 'h1dd3n 4g3nd4'
self.template = []
self.template.append(SmudgeTemplate('name', self.secrets))
def test_nosecret_sub(self):
self.assertEqual(self.template[0].sub('password = hi # comment'),
'password = hi # comment')
def test_nokey_sub(self):
self.assertEqual(self.template[0].sub('password = $DotBriefs: $ # comment'),
'password = $DotBriefs: $ # comment')
def test_nomatch_sub(self):
self.assertEqual(self.template[0].sub('password = $DotBriefs: notfound$ # comment'),
'password = $DotBriefs: notfound$ # comment')
def test_single_sub(self):
self.assertEqual(self.template[0].sub('password = $DotBriefs: password$ # comment'),
'password = s3cr3t # comment')
def test_double_sub(self):
self.assertEqual(self.template[0].sub('password = $DotBriefs: password$; security question = $DotBriefs: question$ # comment'),
'password = s3cr3t; security question = h1dd3n 4g3nd4 # comment')
if __name__ == '__main__':
unittest.main()
|
Add test cases for smudge
|
Add test cases for smudge
|
Python
|
bsd-3-clause
|
oohlaf/dotsecrets
|
Add test cases for smudge
|
import unittest
from dotbriefs.smudge import SmudgeTemplate
class TestCleanSecret(unittest.TestCase):
def setUp(self):
self.secrets = {}
self.secrets['password'] = 's3cr3t'
self.secrets['question'] = 'h1dd3n 4g3nd4'
self.template = []
self.template.append(SmudgeTemplate('name', self.secrets))
def test_nosecret_sub(self):
self.assertEqual(self.template[0].sub('password = hi # comment'),
'password = hi # comment')
def test_nokey_sub(self):
self.assertEqual(self.template[0].sub('password = $DotBriefs: $ # comment'),
'password = $DotBriefs: $ # comment')
def test_nomatch_sub(self):
self.assertEqual(self.template[0].sub('password = $DotBriefs: notfound$ # comment'),
'password = $DotBriefs: notfound$ # comment')
def test_single_sub(self):
self.assertEqual(self.template[0].sub('password = $DotBriefs: password$ # comment'),
'password = s3cr3t # comment')
def test_double_sub(self):
self.assertEqual(self.template[0].sub('password = $DotBriefs: password$; security question = $DotBriefs: question$ # comment'),
'password = s3cr3t; security question = h1dd3n 4g3nd4 # comment')
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test cases for smudge<commit_after>
|
import unittest
from dotbriefs.smudge import SmudgeTemplate
class TestCleanSecret(unittest.TestCase):
def setUp(self):
self.secrets = {}
self.secrets['password'] = 's3cr3t'
self.secrets['question'] = 'h1dd3n 4g3nd4'
self.template = []
self.template.append(SmudgeTemplate('name', self.secrets))
def test_nosecret_sub(self):
self.assertEqual(self.template[0].sub('password = hi # comment'),
'password = hi # comment')
def test_nokey_sub(self):
self.assertEqual(self.template[0].sub('password = $DotBriefs: $ # comment'),
'password = $DotBriefs: $ # comment')
def test_nomatch_sub(self):
self.assertEqual(self.template[0].sub('password = $DotBriefs: notfound$ # comment'),
'password = $DotBriefs: notfound$ # comment')
def test_single_sub(self):
self.assertEqual(self.template[0].sub('password = $DotBriefs: password$ # comment'),
'password = s3cr3t # comment')
def test_double_sub(self):
self.assertEqual(self.template[0].sub('password = $DotBriefs: password$; security question = $DotBriefs: question$ # comment'),
'password = s3cr3t; security question = h1dd3n 4g3nd4 # comment')
if __name__ == '__main__':
unittest.main()
|
Add test cases for smudgeimport unittest
from dotbriefs.smudge import SmudgeTemplate
class TestCleanSecret(unittest.TestCase):
def setUp(self):
self.secrets = {}
self.secrets['password'] = 's3cr3t'
self.secrets['question'] = 'h1dd3n 4g3nd4'
self.template = []
self.template.append(SmudgeTemplate('name', self.secrets))
def test_nosecret_sub(self):
self.assertEqual(self.template[0].sub('password = hi # comment'),
'password = hi # comment')
def test_nokey_sub(self):
self.assertEqual(self.template[0].sub('password = $DotBriefs: $ # comment'),
'password = $DotBriefs: $ # comment')
def test_nomatch_sub(self):
self.assertEqual(self.template[0].sub('password = $DotBriefs: notfound$ # comment'),
'password = $DotBriefs: notfound$ # comment')
def test_single_sub(self):
self.assertEqual(self.template[0].sub('password = $DotBriefs: password$ # comment'),
'password = s3cr3t # comment')
def test_double_sub(self):
self.assertEqual(self.template[0].sub('password = $DotBriefs: password$; security question = $DotBriefs: question$ # comment'),
'password = s3cr3t; security question = h1dd3n 4g3nd4 # comment')
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test cases for smudge<commit_after>import unittest
from dotbriefs.smudge import SmudgeTemplate
class TestCleanSecret(unittest.TestCase):
def setUp(self):
self.secrets = {}
self.secrets['password'] = 's3cr3t'
self.secrets['question'] = 'h1dd3n 4g3nd4'
self.template = []
self.template.append(SmudgeTemplate('name', self.secrets))
def test_nosecret_sub(self):
self.assertEqual(self.template[0].sub('password = hi # comment'),
'password = hi # comment')
def test_nokey_sub(self):
self.assertEqual(self.template[0].sub('password = $DotBriefs: $ # comment'),
'password = $DotBriefs: $ # comment')
def test_nomatch_sub(self):
self.assertEqual(self.template[0].sub('password = $DotBriefs: notfound$ # comment'),
'password = $DotBriefs: notfound$ # comment')
def test_single_sub(self):
self.assertEqual(self.template[0].sub('password = $DotBriefs: password$ # comment'),
'password = s3cr3t # comment')
def test_double_sub(self):
self.assertEqual(self.template[0].sub('password = $DotBriefs: password$; security question = $DotBriefs: question$ # comment'),
'password = s3cr3t; security question = h1dd3n 4g3nd4 # comment')
if __name__ == '__main__':
unittest.main()
|
|
254239102955bb8916aab98530251b5cdd79ce50
|
cypher/siggen.py
|
cypher/siggen.py
|
#!/usr/bin/env python
import argparse
import subprocess
import os
import shutil
import sys
from util import write_signature
parser = argparse.ArgumentParser()
parser.add_argument(
"-l",
"--language",
help="Source code language.",
required=True
)
TEMP_DIR = os.path.join(os.getcwd(), "cypher", "temp")
if os.path.exists(TEMP_DIR):
shutil.rmtree(TEMP_DIR)
lang = vars(parser.parse_args())["language"]
if lang == "Python":
repo = "https://github.com/django/django.git"
ext = [".py"]
elif lang == "Ruby":
repo = "https://github.com/Homebrew/legacy-homebrew.git"
ext = [".rb"]
elif lang == "C":
repo = "https://github.com/git/git.git"
ext = [".c", ".h"]
elif lang == "C++":
repo = "https://github.com/apple/swift.git"
ext = [".cpp", ".cc", ".h"]
elif lang == "R":
repo = "https://github.com/rstudio/shiny.git"
ext = [".R", ".r"]
else:
print("{} not found.".format(lang))
sys.exit(0)
os.makedirs(TEMP_DIR)
pro = subprocess.Popen(
["git", "clone", repo],
cwd=TEMP_DIR,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
(out, error) = pro.communicate()
src_dir = os.path.join(TEMP_DIR, repo.split("/")[-1].split(".")[0])
write_signature(src_dir, lang, ext)
shutil.rmtree(TEMP_DIR)
|
Add script to write base signatures
|
Add script to write base signatures
|
Python
|
mit
|
jdkato/codetype,jdkato/codetype
|
Add script to write base signatures
|
#!/usr/bin/env python
import argparse
import subprocess
import os
import shutil
import sys
from util import write_signature
parser = argparse.ArgumentParser()
parser.add_argument(
"-l",
"--language",
help="Source code language.",
required=True
)
TEMP_DIR = os.path.join(os.getcwd(), "cypher", "temp")
if os.path.exists(TEMP_DIR):
shutil.rmtree(TEMP_DIR)
lang = vars(parser.parse_args())["language"]
if lang == "Python":
repo = "https://github.com/django/django.git"
ext = [".py"]
elif lang == "Ruby":
repo = "https://github.com/Homebrew/legacy-homebrew.git"
ext = [".rb"]
elif lang == "C":
repo = "https://github.com/git/git.git"
ext = [".c", ".h"]
elif lang == "C++":
repo = "https://github.com/apple/swift.git"
ext = [".cpp", ".cc", ".h"]
elif lang == "R":
repo = "https://github.com/rstudio/shiny.git"
ext = [".R", ".r"]
else:
print("{} not found.".format(lang))
sys.exit(0)
os.makedirs(TEMP_DIR)
pro = subprocess.Popen(
["git", "clone", repo],
cwd=TEMP_DIR,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
(out, error) = pro.communicate()
src_dir = os.path.join(TEMP_DIR, repo.split("/")[-1].split(".")[0])
write_signature(src_dir, lang, ext)
shutil.rmtree(TEMP_DIR)
|
<commit_before><commit_msg>Add script to write base signatures<commit_after>
|
#!/usr/bin/env python
import argparse
import subprocess
import os
import shutil
import sys
from util import write_signature
parser = argparse.ArgumentParser()
parser.add_argument(
"-l",
"--language",
help="Source code language.",
required=True
)
TEMP_DIR = os.path.join(os.getcwd(), "cypher", "temp")
if os.path.exists(TEMP_DIR):
shutil.rmtree(TEMP_DIR)
lang = vars(parser.parse_args())["language"]
if lang == "Python":
repo = "https://github.com/django/django.git"
ext = [".py"]
elif lang == "Ruby":
repo = "https://github.com/Homebrew/legacy-homebrew.git"
ext = [".rb"]
elif lang == "C":
repo = "https://github.com/git/git.git"
ext = [".c", ".h"]
elif lang == "C++":
repo = "https://github.com/apple/swift.git"
ext = [".cpp", ".cc", ".h"]
elif lang == "R":
repo = "https://github.com/rstudio/shiny.git"
ext = [".R", ".r"]
else:
print("{} not found.".format(lang))
sys.exit(0)
os.makedirs(TEMP_DIR)
pro = subprocess.Popen(
["git", "clone", repo],
cwd=TEMP_DIR,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
(out, error) = pro.communicate()
src_dir = os.path.join(TEMP_DIR, repo.split("/")[-1].split(".")[0])
write_signature(src_dir, lang, ext)
shutil.rmtree(TEMP_DIR)
|
Add script to write base signatures#!/usr/bin/env python
import argparse
import subprocess
import os
import shutil
import sys
from util import write_signature
parser = argparse.ArgumentParser()
parser.add_argument(
"-l",
"--language",
help="Source code language.",
required=True
)
TEMP_DIR = os.path.join(os.getcwd(), "cypher", "temp")
if os.path.exists(TEMP_DIR):
shutil.rmtree(TEMP_DIR)
lang = vars(parser.parse_args())["language"]
if lang == "Python":
repo = "https://github.com/django/django.git"
ext = [".py"]
elif lang == "Ruby":
repo = "https://github.com/Homebrew/legacy-homebrew.git"
ext = [".rb"]
elif lang == "C":
repo = "https://github.com/git/git.git"
ext = [".c", ".h"]
elif lang == "C++":
repo = "https://github.com/apple/swift.git"
ext = [".cpp", ".cc", ".h"]
elif lang == "R":
repo = "https://github.com/rstudio/shiny.git"
ext = [".R", ".r"]
else:
print("{} not found.".format(lang))
sys.exit(0)
os.makedirs(TEMP_DIR)
pro = subprocess.Popen(
["git", "clone", repo],
cwd=TEMP_DIR,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
(out, error) = pro.communicate()
src_dir = os.path.join(TEMP_DIR, repo.split("/")[-1].split(".")[0])
write_signature(src_dir, lang, ext)
shutil.rmtree(TEMP_DIR)
|
<commit_before><commit_msg>Add script to write base signatures<commit_after>#!/usr/bin/env python
import argparse
import subprocess
import os
import shutil
import sys
from util import write_signature
parser = argparse.ArgumentParser()
parser.add_argument(
"-l",
"--language",
help="Source code language.",
required=True
)
TEMP_DIR = os.path.join(os.getcwd(), "cypher", "temp")
if os.path.exists(TEMP_DIR):
shutil.rmtree(TEMP_DIR)
lang = vars(parser.parse_args())["language"]
if lang == "Python":
repo = "https://github.com/django/django.git"
ext = [".py"]
elif lang == "Ruby":
repo = "https://github.com/Homebrew/legacy-homebrew.git"
ext = [".rb"]
elif lang == "C":
repo = "https://github.com/git/git.git"
ext = [".c", ".h"]
elif lang == "C++":
repo = "https://github.com/apple/swift.git"
ext = [".cpp", ".cc", ".h"]
elif lang == "R":
repo = "https://github.com/rstudio/shiny.git"
ext = [".R", ".r"]
else:
print("{} not found.".format(lang))
sys.exit(0)
os.makedirs(TEMP_DIR)
pro = subprocess.Popen(
["git", "clone", repo],
cwd=TEMP_DIR,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
(out, error) = pro.communicate()
src_dir = os.path.join(TEMP_DIR, repo.split("/")[-1].split(".")[0])
write_signature(src_dir, lang, ext)
shutil.rmtree(TEMP_DIR)
|
|
19b13f0fb9b86ec99025bd1baf2c4d5fe757f809
|
tests/test_tests.py
|
tests/test_tests.py
|
import pytest
def test_BeautifulSoup_methods_are_overridden(
client_request,
mock_get_service_and_organisation_counts,
):
client_request.logout()
page = client_request.get("main.index", _test_page_title=False)
with pytest.raises(AttributeError) as exception:
page.find("h1")
assert str(exception.value) == "Don’t use BeautifulSoup.find – try BeautifulSoup.select_one instead"
with pytest.raises(AttributeError) as exception:
page.find_all("h1")
assert str(exception.value) == "Don’t use BeautifulSoup.find_all – try BeautifulSoup.select instead"
|
Add a test to make sure exception is raised
|
Add a test to make sure exception is raised
None of our code should be raising this exception, so it’s possible it
could stop working without us knowing about it.
This commit adds a tests to alert us if the override ever does silently
stop working (for example because of a change in `BeautifulSoup`)
|
Python
|
mit
|
alphagov/notifications-admin,alphagov/notifications-admin,alphagov/notifications-admin,alphagov/notifications-admin
|
Add a test to make sure exception is raised
None of our code should be raising this exception, so it’s possible it
could stop working without us knowing about it.
This commit adds a tests to alert us if the override ever does silently
stop working (for example because of a change in `BeautifulSoup`)
|
import pytest
def test_BeautifulSoup_methods_are_overridden(
client_request,
mock_get_service_and_organisation_counts,
):
client_request.logout()
page = client_request.get("main.index", _test_page_title=False)
with pytest.raises(AttributeError) as exception:
page.find("h1")
assert str(exception.value) == "Don’t use BeautifulSoup.find – try BeautifulSoup.select_one instead"
with pytest.raises(AttributeError) as exception:
page.find_all("h1")
assert str(exception.value) == "Don’t use BeautifulSoup.find_all – try BeautifulSoup.select instead"
|
<commit_before><commit_msg>Add a test to make sure exception is raised
None of our code should be raising this exception, so it’s possible it
could stop working without us knowing about it.
This commit adds a tests to alert us if the override ever does silently
stop working (for example because of a change in `BeautifulSoup`)<commit_after>
|
import pytest
def test_BeautifulSoup_methods_are_overridden(
client_request,
mock_get_service_and_organisation_counts,
):
client_request.logout()
page = client_request.get("main.index", _test_page_title=False)
with pytest.raises(AttributeError) as exception:
page.find("h1")
assert str(exception.value) == "Don’t use BeautifulSoup.find – try BeautifulSoup.select_one instead"
with pytest.raises(AttributeError) as exception:
page.find_all("h1")
assert str(exception.value) == "Don’t use BeautifulSoup.find_all – try BeautifulSoup.select instead"
|
Add a test to make sure exception is raised
None of our code should be raising this exception, so it’s possible it
could stop working without us knowing about it.
This commit adds a tests to alert us if the override ever does silently
stop working (for example because of a change in `BeautifulSoup`)import pytest
def test_BeautifulSoup_methods_are_overridden(
client_request,
mock_get_service_and_organisation_counts,
):
client_request.logout()
page = client_request.get("main.index", _test_page_title=False)
with pytest.raises(AttributeError) as exception:
page.find("h1")
assert str(exception.value) == "Don’t use BeautifulSoup.find – try BeautifulSoup.select_one instead"
with pytest.raises(AttributeError) as exception:
page.find_all("h1")
assert str(exception.value) == "Don’t use BeautifulSoup.find_all – try BeautifulSoup.select instead"
|
<commit_before><commit_msg>Add a test to make sure exception is raised
None of our code should be raising this exception, so it’s possible it
could stop working without us knowing about it.
This commit adds a tests to alert us if the override ever does silently
stop working (for example because of a change in `BeautifulSoup`)<commit_after>import pytest
def test_BeautifulSoup_methods_are_overridden(
client_request,
mock_get_service_and_organisation_counts,
):
client_request.logout()
page = client_request.get("main.index", _test_page_title=False)
with pytest.raises(AttributeError) as exception:
page.find("h1")
assert str(exception.value) == "Don’t use BeautifulSoup.find – try BeautifulSoup.select_one instead"
with pytest.raises(AttributeError) as exception:
page.find_all("h1")
assert str(exception.value) == "Don’t use BeautifulSoup.find_all – try BeautifulSoup.select instead"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.