commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
56d0f20de569deb359e172bee7bf245a398c3430
|
setting_seq_len.py
|
setting_seq_len.py
|
import tensorflow as tf
import numpy as np
tf.set_random_seed(765)
np.random.seed(765)
tf.reset_default_graph()
n_inputs = 3
n_neurons = 5
n_steps = 2
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
seq_length = tf.placeholder(tf.int32, [None])
basic_cell = tf.contrib.rnn.BasicRNNCell(num_units=n_neurons)
outputs, states = tf.nn.dynamic_rnn(basic_cell, X, sequence_length=seq_length,
dtype=tf.float32)
init = tf.global_variables_initializer()
X_batch = np.array([
[[0, 1, 2], [9, 8, 7]], # instance 1
[[3, 4, 5], [0, 0, 0]], # instance 2
[[6, 7, 8], [6, 5, 4]], # instance 3
[[9, 0, 1], [3, 2, 1]], # instance 4
])
seq_length_batch = np.array([2,1,2,2])
with tf.Session() as sess:
init.run()
outputs_val, states_val = sess.run(
[outputs, states], feed_dict={X: X_batch, seq_length: seq_length_batch}
)
print(outputs_val)
print(states_val)
|
Add code for setting sequence length for RNN
|
Add code for setting sequence length for RNN
In some cases, sequence length may differ (sentences, sound)
This small example shows how to manually deal seq
of different lengths.
|
Python
|
mit
|
KT12/hands_on_machine_learning
|
Add code for setting sequence length for RNN
In some cases, sequence length may differ (sentences, sound)
This small example shows how to manually deal seq
of different lengths.
|
import tensorflow as tf
import numpy as np
tf.set_random_seed(765)
np.random.seed(765)
tf.reset_default_graph()
n_inputs = 3
n_neurons = 5
n_steps = 2
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
seq_length = tf.placeholder(tf.int32, [None])
basic_cell = tf.contrib.rnn.BasicRNNCell(num_units=n_neurons)
outputs, states = tf.nn.dynamic_rnn(basic_cell, X, sequence_length=seq_length,
dtype=tf.float32)
init = tf.global_variables_initializer()
X_batch = np.array([
[[0, 1, 2], [9, 8, 7]], # instance 1
[[3, 4, 5], [0, 0, 0]], # instance 2
[[6, 7, 8], [6, 5, 4]], # instance 3
[[9, 0, 1], [3, 2, 1]], # instance 4
])
seq_length_batch = np.array([2,1,2,2])
with tf.Session() as sess:
init.run()
outputs_val, states_val = sess.run(
[outputs, states], feed_dict={X: X_batch, seq_length: seq_length_batch}
)
print(outputs_val)
print(states_val)
|
<commit_before><commit_msg>Add code for setting sequence length for RNN
In some cases, sequence length may differ (sentences, sound)
This small example shows how to manually deal seq
of different lengths.<commit_after>
|
import tensorflow as tf
import numpy as np
tf.set_random_seed(765)
np.random.seed(765)
tf.reset_default_graph()
n_inputs = 3
n_neurons = 5
n_steps = 2
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
seq_length = tf.placeholder(tf.int32, [None])
basic_cell = tf.contrib.rnn.BasicRNNCell(num_units=n_neurons)
outputs, states = tf.nn.dynamic_rnn(basic_cell, X, sequence_length=seq_length,
dtype=tf.float32)
init = tf.global_variables_initializer()
X_batch = np.array([
[[0, 1, 2], [9, 8, 7]], # instance 1
[[3, 4, 5], [0, 0, 0]], # instance 2
[[6, 7, 8], [6, 5, 4]], # instance 3
[[9, 0, 1], [3, 2, 1]], # instance 4
])
seq_length_batch = np.array([2,1,2,2])
with tf.Session() as sess:
init.run()
outputs_val, states_val = sess.run(
[outputs, states], feed_dict={X: X_batch, seq_length: seq_length_batch}
)
print(outputs_val)
print(states_val)
|
Add code for setting sequence length for RNN
In some cases, sequence length may differ (sentences, sound)
This small example shows how to manually deal seq
of different lengths.import tensorflow as tf
import numpy as np
tf.set_random_seed(765)
np.random.seed(765)
tf.reset_default_graph()
n_inputs = 3
n_neurons = 5
n_steps = 2
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
seq_length = tf.placeholder(tf.int32, [None])
basic_cell = tf.contrib.rnn.BasicRNNCell(num_units=n_neurons)
outputs, states = tf.nn.dynamic_rnn(basic_cell, X, sequence_length=seq_length,
dtype=tf.float32)
init = tf.global_variables_initializer()
X_batch = np.array([
[[0, 1, 2], [9, 8, 7]], # instance 1
[[3, 4, 5], [0, 0, 0]], # instance 2
[[6, 7, 8], [6, 5, 4]], # instance 3
[[9, 0, 1], [3, 2, 1]], # instance 4
])
seq_length_batch = np.array([2,1,2,2])
with tf.Session() as sess:
init.run()
outputs_val, states_val = sess.run(
[outputs, states], feed_dict={X: X_batch, seq_length: seq_length_batch}
)
print(outputs_val)
print(states_val)
|
<commit_before><commit_msg>Add code for setting sequence length for RNN
In some cases, sequence length may differ (sentences, sound)
This small example shows how to manually deal seq
of different lengths.<commit_after>import tensorflow as tf
import numpy as np
tf.set_random_seed(765)
np.random.seed(765)
tf.reset_default_graph()
n_inputs = 3
n_neurons = 5
n_steps = 2
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs])
seq_length = tf.placeholder(tf.int32, [None])
basic_cell = tf.contrib.rnn.BasicRNNCell(num_units=n_neurons)
outputs, states = tf.nn.dynamic_rnn(basic_cell, X, sequence_length=seq_length,
dtype=tf.float32)
init = tf.global_variables_initializer()
X_batch = np.array([
[[0, 1, 2], [9, 8, 7]], # instance 1
[[3, 4, 5], [0, 0, 0]], # instance 2
[[6, 7, 8], [6, 5, 4]], # instance 3
[[9, 0, 1], [3, 2, 1]], # instance 4
])
seq_length_batch = np.array([2,1,2,2])
with tf.Session() as sess:
init.run()
outputs_val, states_val = sess.run(
[outputs, states], feed_dict={X: X_batch, seq_length: seq_length_batch}
)
print(outputs_val)
print(states_val)
|
|
033bcb02dbd0e4c7c89ddd350c2fa0ca247f59ee
|
tests/disabled_test_animals.py
|
tests/disabled_test_animals.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2016 MIT Probabilistic Computing Project
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pytest
from cgpm.crosscat.engine import Engine
from cgpm.utils import general as gu
from cgpm.utils import plots as pu
from cgpm.utils import test as tu
from cgpm.utils import render_utils as ru
animals = pd.read_csv('resources/animals/animals.csv', index_col=0)
animal_values = animals.values
animal_names = animals.index.values
animal_features = animals.columns.values
def launch_analysis():
engine = Engine(
animals.values.astype(float),
num_states=64,
cctypes=['categorical']*len(animals.values[0]),
distargs=[{'k':2}]*len(animals.values[0]),
rng=gu.gen_rng(7))
engine.transition(N=900)
with open('resources/animals/animals.engine', 'w') as f:
engine.to_pickle(f)
engine = Engine.from_pickle(open('resources/animals/animals.engine','r'))
D = engine.dependence_probability_pairwise()
pu.plot_clustermap(D)
def render_states_to_disk(filepath, prefix):
engine = Engine.from_pickle(filepath)
for i in range(engine.num_states()):
print '\r%d' % (i,)
savefile = '%s-%d' % (prefix, i)
state = engine.get_state(i)
ru.viz_state(
state, row_names=animal_names, col_names=animal_features,
savefile=savefile)
# render_states_to_disk(
# 'resources/animals/animals-normal.engine',
# 'resources/animals/normal/cc-normal')
# render_states_to_disk(
# 'resources/animals/animals-normal-lovecat.engine',
# 'resources/animals/normal/lv-normal')
|
Add the animals testing harness.
|
Add the animals testing harness.
|
Python
|
apache-2.0
|
probcomp/cgpm,probcomp/cgpm
|
Add the animals testing harness.
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2016 MIT Probabilistic Computing Project
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pytest
from cgpm.crosscat.engine import Engine
from cgpm.utils import general as gu
from cgpm.utils import plots as pu
from cgpm.utils import test as tu
from cgpm.utils import render_utils as ru
animals = pd.read_csv('resources/animals/animals.csv', index_col=0)
animal_values = animals.values
animal_names = animals.index.values
animal_features = animals.columns.values
def launch_analysis():
engine = Engine(
animals.values.astype(float),
num_states=64,
cctypes=['categorical']*len(animals.values[0]),
distargs=[{'k':2}]*len(animals.values[0]),
rng=gu.gen_rng(7))
engine.transition(N=900)
with open('resources/animals/animals.engine', 'w') as f:
engine.to_pickle(f)
engine = Engine.from_pickle(open('resources/animals/animals.engine','r'))
D = engine.dependence_probability_pairwise()
pu.plot_clustermap(D)
def render_states_to_disk(filepath, prefix):
engine = Engine.from_pickle(filepath)
for i in range(engine.num_states()):
print '\r%d' % (i,)
savefile = '%s-%d' % (prefix, i)
state = engine.get_state(i)
ru.viz_state(
state, row_names=animal_names, col_names=animal_features,
savefile=savefile)
# render_states_to_disk(
# 'resources/animals/animals-normal.engine',
# 'resources/animals/normal/cc-normal')
# render_states_to_disk(
# 'resources/animals/animals-normal-lovecat.engine',
# 'resources/animals/normal/lv-normal')
|
<commit_before><commit_msg>Add the animals testing harness.<commit_after>
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2016 MIT Probabilistic Computing Project
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pytest
from cgpm.crosscat.engine import Engine
from cgpm.utils import general as gu
from cgpm.utils import plots as pu
from cgpm.utils import test as tu
from cgpm.utils import render_utils as ru
animals = pd.read_csv('resources/animals/animals.csv', index_col=0)
animal_values = animals.values
animal_names = animals.index.values
animal_features = animals.columns.values
def launch_analysis():
engine = Engine(
animals.values.astype(float),
num_states=64,
cctypes=['categorical']*len(animals.values[0]),
distargs=[{'k':2}]*len(animals.values[0]),
rng=gu.gen_rng(7))
engine.transition(N=900)
with open('resources/animals/animals.engine', 'w') as f:
engine.to_pickle(f)
engine = Engine.from_pickle(open('resources/animals/animals.engine','r'))
D = engine.dependence_probability_pairwise()
pu.plot_clustermap(D)
def render_states_to_disk(filepath, prefix):
engine = Engine.from_pickle(filepath)
for i in range(engine.num_states()):
print '\r%d' % (i,)
savefile = '%s-%d' % (prefix, i)
state = engine.get_state(i)
ru.viz_state(
state, row_names=animal_names, col_names=animal_features,
savefile=savefile)
# render_states_to_disk(
# 'resources/animals/animals-normal.engine',
# 'resources/animals/normal/cc-normal')
# render_states_to_disk(
# 'resources/animals/animals-normal-lovecat.engine',
# 'resources/animals/normal/lv-normal')
|
Add the animals testing harness.# -*- coding: utf-8 -*-
# Copyright (c) 2015-2016 MIT Probabilistic Computing Project
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pytest
from cgpm.crosscat.engine import Engine
from cgpm.utils import general as gu
from cgpm.utils import plots as pu
from cgpm.utils import test as tu
from cgpm.utils import render_utils as ru
animals = pd.read_csv('resources/animals/animals.csv', index_col=0)
animal_values = animals.values
animal_names = animals.index.values
animal_features = animals.columns.values
def launch_analysis():
engine = Engine(
animals.values.astype(float),
num_states=64,
cctypes=['categorical']*len(animals.values[0]),
distargs=[{'k':2}]*len(animals.values[0]),
rng=gu.gen_rng(7))
engine.transition(N=900)
with open('resources/animals/animals.engine', 'w') as f:
engine.to_pickle(f)
engine = Engine.from_pickle(open('resources/animals/animals.engine','r'))
D = engine.dependence_probability_pairwise()
pu.plot_clustermap(D)
def render_states_to_disk(filepath, prefix):
engine = Engine.from_pickle(filepath)
for i in range(engine.num_states()):
print '\r%d' % (i,)
savefile = '%s-%d' % (prefix, i)
state = engine.get_state(i)
ru.viz_state(
state, row_names=animal_names, col_names=animal_features,
savefile=savefile)
# render_states_to_disk(
# 'resources/animals/animals-normal.engine',
# 'resources/animals/normal/cc-normal')
# render_states_to_disk(
# 'resources/animals/animals-normal-lovecat.engine',
# 'resources/animals/normal/lv-normal')
|
<commit_before><commit_msg>Add the animals testing harness.<commit_after># -*- coding: utf-8 -*-
# Copyright (c) 2015-2016 MIT Probabilistic Computing Project
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pytest
from cgpm.crosscat.engine import Engine
from cgpm.utils import general as gu
from cgpm.utils import plots as pu
from cgpm.utils import test as tu
from cgpm.utils import render_utils as ru
animals = pd.read_csv('resources/animals/animals.csv', index_col=0)
animal_values = animals.values
animal_names = animals.index.values
animal_features = animals.columns.values
def launch_analysis():
engine = Engine(
animals.values.astype(float),
num_states=64,
cctypes=['categorical']*len(animals.values[0]),
distargs=[{'k':2}]*len(animals.values[0]),
rng=gu.gen_rng(7))
engine.transition(N=900)
with open('resources/animals/animals.engine', 'w') as f:
engine.to_pickle(f)
engine = Engine.from_pickle(open('resources/animals/animals.engine','r'))
D = engine.dependence_probability_pairwise()
pu.plot_clustermap(D)
def render_states_to_disk(filepath, prefix):
engine = Engine.from_pickle(filepath)
for i in range(engine.num_states()):
print '\r%d' % (i,)
savefile = '%s-%d' % (prefix, i)
state = engine.get_state(i)
ru.viz_state(
state, row_names=animal_names, col_names=animal_features,
savefile=savefile)
# render_states_to_disk(
# 'resources/animals/animals-normal.engine',
# 'resources/animals/normal/cc-normal')
# render_states_to_disk(
# 'resources/animals/animals-normal-lovecat.engine',
# 'resources/animals/normal/lv-normal')
|
|
8e744f67457222a143505f303c0254e43706d3f8
|
tests/aggregate/test_join_table_inheritance.py
|
tests/aggregate/test_join_table_inheritance.py
|
from decimal import Decimal
import sqlalchemy as sa
from sqlalchemy_utils.aggregates import aggregated
from tests import TestCase
class TestLazyEvaluatedSelectExpressionsForAggregates(TestCase):
dns = 'postgres://postgres@localhost/sqlalchemy_utils_test'
def create_models(self):
class Catalog(self.Base):
__tablename__ = 'catalog'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
type = sa.Column(sa.Unicode(255))
__mapper_args__ = {
'polymorphic_on': type
}
@aggregated('products', sa.Column(sa.Numeric, default=0))
def net_worth(self):
return sa.func.sum(Product.price)
products = sa.orm.relationship('Product', backref='catalog')
class CostumeCatalog(Catalog):
__tablename__ = 'costume_catalog'
id = sa.Column(
sa.Integer, sa.ForeignKey(Catalog.id), primary_key=True
)
__mapper_args__ = {
'polymorphic_identity': 'costumes',
}
class CarCatalog(Catalog):
__tablename__ = 'car_catalog'
id = sa.Column(
sa.Integer, sa.ForeignKey(Catalog.id), primary_key=True
)
__mapper_args__ = {
'polymorphic_identity': 'cars',
}
class Product(self.Base):
__tablename__ = 'product'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
price = sa.Column(sa.Numeric)
catalog_id = sa.Column(sa.Integer, sa.ForeignKey('catalog.id'))
self.Catalog = Catalog
self.Product = Product
def test_assigns_aggregates_on_insert(self):
catalog = self.Catalog(
name=u'Some catalog'
)
self.session.add(catalog)
self.session.commit()
product = self.Product(
name=u'Some product',
price=Decimal('1000'),
catalog=catalog
)
self.session.add(product)
self.session.commit()
self.session.refresh(catalog)
assert catalog.net_worth == Decimal('1000')
def test_assigns_aggregates_on_update(self):
catalog = self.Catalog(
name=u'Some catalog'
)
self.session.add(catalog)
self.session.commit()
product = self.Product(
name=u'Some product',
price=Decimal('1000'),
catalog=catalog
)
self.session.add(product)
self.session.commit()
product.price = Decimal('500')
self.session.commit()
self.session.refresh(catalog)
assert catalog.net_worth == Decimal('500')
|
Add tests for aggregates with inheritance
|
Add tests for aggregates with inheritance
|
Python
|
bsd-3-clause
|
joshfriend/sqlalchemy-utils,marrybird/sqlalchemy-utils,konstantinoskostis/sqlalchemy-utils,rmoorman/sqlalchemy-utils,tonyseek/sqlalchemy-utils,joshfriend/sqlalchemy-utils,tonyseek/sqlalchemy-utils,cheungpat/sqlalchemy-utils,spoqa/sqlalchemy-utils,JackWink/sqlalchemy-utils
|
Add tests for aggregates with inheritance
|
from decimal import Decimal
import sqlalchemy as sa
from sqlalchemy_utils.aggregates import aggregated
from tests import TestCase
class TestLazyEvaluatedSelectExpressionsForAggregates(TestCase):
dns = 'postgres://postgres@localhost/sqlalchemy_utils_test'
def create_models(self):
class Catalog(self.Base):
__tablename__ = 'catalog'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
type = sa.Column(sa.Unicode(255))
__mapper_args__ = {
'polymorphic_on': type
}
@aggregated('products', sa.Column(sa.Numeric, default=0))
def net_worth(self):
return sa.func.sum(Product.price)
products = sa.orm.relationship('Product', backref='catalog')
class CostumeCatalog(Catalog):
__tablename__ = 'costume_catalog'
id = sa.Column(
sa.Integer, sa.ForeignKey(Catalog.id), primary_key=True
)
__mapper_args__ = {
'polymorphic_identity': 'costumes',
}
class CarCatalog(Catalog):
__tablename__ = 'car_catalog'
id = sa.Column(
sa.Integer, sa.ForeignKey(Catalog.id), primary_key=True
)
__mapper_args__ = {
'polymorphic_identity': 'cars',
}
class Product(self.Base):
__tablename__ = 'product'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
price = sa.Column(sa.Numeric)
catalog_id = sa.Column(sa.Integer, sa.ForeignKey('catalog.id'))
self.Catalog = Catalog
self.Product = Product
def test_assigns_aggregates_on_insert(self):
catalog = self.Catalog(
name=u'Some catalog'
)
self.session.add(catalog)
self.session.commit()
product = self.Product(
name=u'Some product',
price=Decimal('1000'),
catalog=catalog
)
self.session.add(product)
self.session.commit()
self.session.refresh(catalog)
assert catalog.net_worth == Decimal('1000')
def test_assigns_aggregates_on_update(self):
catalog = self.Catalog(
name=u'Some catalog'
)
self.session.add(catalog)
self.session.commit()
product = self.Product(
name=u'Some product',
price=Decimal('1000'),
catalog=catalog
)
self.session.add(product)
self.session.commit()
product.price = Decimal('500')
self.session.commit()
self.session.refresh(catalog)
assert catalog.net_worth == Decimal('500')
|
<commit_before><commit_msg>Add tests for aggregates with inheritance<commit_after>
|
from decimal import Decimal
import sqlalchemy as sa
from sqlalchemy_utils.aggregates import aggregated
from tests import TestCase
class TestLazyEvaluatedSelectExpressionsForAggregates(TestCase):
dns = 'postgres://postgres@localhost/sqlalchemy_utils_test'
def create_models(self):
class Catalog(self.Base):
__tablename__ = 'catalog'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
type = sa.Column(sa.Unicode(255))
__mapper_args__ = {
'polymorphic_on': type
}
@aggregated('products', sa.Column(sa.Numeric, default=0))
def net_worth(self):
return sa.func.sum(Product.price)
products = sa.orm.relationship('Product', backref='catalog')
class CostumeCatalog(Catalog):
__tablename__ = 'costume_catalog'
id = sa.Column(
sa.Integer, sa.ForeignKey(Catalog.id), primary_key=True
)
__mapper_args__ = {
'polymorphic_identity': 'costumes',
}
class CarCatalog(Catalog):
__tablename__ = 'car_catalog'
id = sa.Column(
sa.Integer, sa.ForeignKey(Catalog.id), primary_key=True
)
__mapper_args__ = {
'polymorphic_identity': 'cars',
}
class Product(self.Base):
__tablename__ = 'product'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
price = sa.Column(sa.Numeric)
catalog_id = sa.Column(sa.Integer, sa.ForeignKey('catalog.id'))
self.Catalog = Catalog
self.Product = Product
def test_assigns_aggregates_on_insert(self):
catalog = self.Catalog(
name=u'Some catalog'
)
self.session.add(catalog)
self.session.commit()
product = self.Product(
name=u'Some product',
price=Decimal('1000'),
catalog=catalog
)
self.session.add(product)
self.session.commit()
self.session.refresh(catalog)
assert catalog.net_worth == Decimal('1000')
def test_assigns_aggregates_on_update(self):
catalog = self.Catalog(
name=u'Some catalog'
)
self.session.add(catalog)
self.session.commit()
product = self.Product(
name=u'Some product',
price=Decimal('1000'),
catalog=catalog
)
self.session.add(product)
self.session.commit()
product.price = Decimal('500')
self.session.commit()
self.session.refresh(catalog)
assert catalog.net_worth == Decimal('500')
|
Add tests for aggregates with inheritancefrom decimal import Decimal
import sqlalchemy as sa
from sqlalchemy_utils.aggregates import aggregated
from tests import TestCase
class TestLazyEvaluatedSelectExpressionsForAggregates(TestCase):
dns = 'postgres://postgres@localhost/sqlalchemy_utils_test'
def create_models(self):
class Catalog(self.Base):
__tablename__ = 'catalog'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
type = sa.Column(sa.Unicode(255))
__mapper_args__ = {
'polymorphic_on': type
}
@aggregated('products', sa.Column(sa.Numeric, default=0))
def net_worth(self):
return sa.func.sum(Product.price)
products = sa.orm.relationship('Product', backref='catalog')
class CostumeCatalog(Catalog):
__tablename__ = 'costume_catalog'
id = sa.Column(
sa.Integer, sa.ForeignKey(Catalog.id), primary_key=True
)
__mapper_args__ = {
'polymorphic_identity': 'costumes',
}
class CarCatalog(Catalog):
__tablename__ = 'car_catalog'
id = sa.Column(
sa.Integer, sa.ForeignKey(Catalog.id), primary_key=True
)
__mapper_args__ = {
'polymorphic_identity': 'cars',
}
class Product(self.Base):
__tablename__ = 'product'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
price = sa.Column(sa.Numeric)
catalog_id = sa.Column(sa.Integer, sa.ForeignKey('catalog.id'))
self.Catalog = Catalog
self.Product = Product
def test_assigns_aggregates_on_insert(self):
catalog = self.Catalog(
name=u'Some catalog'
)
self.session.add(catalog)
self.session.commit()
product = self.Product(
name=u'Some product',
price=Decimal('1000'),
catalog=catalog
)
self.session.add(product)
self.session.commit()
self.session.refresh(catalog)
assert catalog.net_worth == Decimal('1000')
def test_assigns_aggregates_on_update(self):
catalog = self.Catalog(
name=u'Some catalog'
)
self.session.add(catalog)
self.session.commit()
product = self.Product(
name=u'Some product',
price=Decimal('1000'),
catalog=catalog
)
self.session.add(product)
self.session.commit()
product.price = Decimal('500')
self.session.commit()
self.session.refresh(catalog)
assert catalog.net_worth == Decimal('500')
|
<commit_before><commit_msg>Add tests for aggregates with inheritance<commit_after>from decimal import Decimal
import sqlalchemy as sa
from sqlalchemy_utils.aggregates import aggregated
from tests import TestCase
class TestLazyEvaluatedSelectExpressionsForAggregates(TestCase):
dns = 'postgres://postgres@localhost/sqlalchemy_utils_test'
def create_models(self):
class Catalog(self.Base):
__tablename__ = 'catalog'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
type = sa.Column(sa.Unicode(255))
__mapper_args__ = {
'polymorphic_on': type
}
@aggregated('products', sa.Column(sa.Numeric, default=0))
def net_worth(self):
return sa.func.sum(Product.price)
products = sa.orm.relationship('Product', backref='catalog')
class CostumeCatalog(Catalog):
__tablename__ = 'costume_catalog'
id = sa.Column(
sa.Integer, sa.ForeignKey(Catalog.id), primary_key=True
)
__mapper_args__ = {
'polymorphic_identity': 'costumes',
}
class CarCatalog(Catalog):
__tablename__ = 'car_catalog'
id = sa.Column(
sa.Integer, sa.ForeignKey(Catalog.id), primary_key=True
)
__mapper_args__ = {
'polymorphic_identity': 'cars',
}
class Product(self.Base):
__tablename__ = 'product'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
price = sa.Column(sa.Numeric)
catalog_id = sa.Column(sa.Integer, sa.ForeignKey('catalog.id'))
self.Catalog = Catalog
self.Product = Product
def test_assigns_aggregates_on_insert(self):
catalog = self.Catalog(
name=u'Some catalog'
)
self.session.add(catalog)
self.session.commit()
product = self.Product(
name=u'Some product',
price=Decimal('1000'),
catalog=catalog
)
self.session.add(product)
self.session.commit()
self.session.refresh(catalog)
assert catalog.net_worth == Decimal('1000')
def test_assigns_aggregates_on_update(self):
catalog = self.Catalog(
name=u'Some catalog'
)
self.session.add(catalog)
self.session.commit()
product = self.Product(
name=u'Some product',
price=Decimal('1000'),
catalog=catalog
)
self.session.add(product)
self.session.commit()
product.price = Decimal('500')
self.session.commit()
self.session.refresh(catalog)
assert catalog.net_worth == Decimal('500')
|
|
54a3be9039292f33d8d29a749353a92fca6cc1c9
|
tests/__init__.py
|
tests/__init__.py
|
#TODO: REMOVE COMMENTS ONCE BELOW TESTS ARE CONVERTED TO THE UNITTEST FRAMEWORK
#from .test_dataset import *
from .test_pandas_dataset import *
#from .test_great_expectations import *
#from .test_util import *
|
Add default module for tests to support automatically running all unit tests, with associated scaffold for when other tests are converted.
|
Add default module for tests to support automatically running all unit tests, with associated scaffold for when other tests are converted.
|
Python
|
apache-2.0
|
great-expectations/great_expectations,great-expectations/great_expectations,great-expectations/great_expectations,great-expectations/great_expectations
|
Add default module for tests to support automatically running all unit tests, with associated scaffold for when other tests are converted.
|
#TODO: REMOVE COMMENTS ONCE BELOW TESTS ARE CONVERTED TO THE UNITTEST FRAMEWORK
#from .test_dataset import *
from .test_pandas_dataset import *
#from .test_great_expectations import *
#from .test_util import *
|
<commit_before><commit_msg>Add default module for tests to support automatically running all unit tests, with associated scaffold for when other tests are converted.<commit_after>
|
#TODO: REMOVE COMMENTS ONCE BELOW TESTS ARE CONVERTED TO THE UNITTEST FRAMEWORK
#from .test_dataset import *
from .test_pandas_dataset import *
#from .test_great_expectations import *
#from .test_util import *
|
Add default module for tests to support automatically running all unit tests, with associated scaffold for when other tests are converted.#TODO: REMOVE COMMENTS ONCE BELOW TESTS ARE CONVERTED TO THE UNITTEST FRAMEWORK
#from .test_dataset import *
from .test_pandas_dataset import *
#from .test_great_expectations import *
#from .test_util import *
|
<commit_before><commit_msg>Add default module for tests to support automatically running all unit tests, with associated scaffold for when other tests are converted.<commit_after>#TODO: REMOVE COMMENTS ONCE BELOW TESTS ARE CONVERTED TO THE UNITTEST FRAMEWORK
#from .test_dataset import *
from .test_pandas_dataset import *
#from .test_great_expectations import *
#from .test_util import *
|
|
c047804ec995884794afc26fd57872becbe8686f
|
tests/test_git.py
|
tests/test_git.py
|
from subprocess import check_call
from valohai_cli.git import get_current_commit
def test_get_current_commit(tmpdir):
dir = str(tmpdir)
check_call('git init', cwd=dir, shell=True)
check_call('git config user.name Robot', cwd=dir, shell=True)
check_call('git config user.email robot@example.com', cwd=dir, shell=True)
tmpdir.join('test').write_text('test', 'utf8')
check_call('git add .', cwd=dir, shell=True)
check_call('git commit -mtest', cwd=dir, shell=True)
assert len(get_current_commit(dir)) == 40
|
Add a test for get_current_commit
|
Add a test for get_current_commit
|
Python
|
mit
|
valohai/valohai-cli
|
Add a test for get_current_commit
|
from subprocess import check_call
from valohai_cli.git import get_current_commit
def test_get_current_commit(tmpdir):
dir = str(tmpdir)
check_call('git init', cwd=dir, shell=True)
check_call('git config user.name Robot', cwd=dir, shell=True)
check_call('git config user.email robot@example.com', cwd=dir, shell=True)
tmpdir.join('test').write_text('test', 'utf8')
check_call('git add .', cwd=dir, shell=True)
check_call('git commit -mtest', cwd=dir, shell=True)
assert len(get_current_commit(dir)) == 40
|
<commit_before><commit_msg>Add a test for get_current_commit<commit_after>
|
from subprocess import check_call
from valohai_cli.git import get_current_commit
def test_get_current_commit(tmpdir):
dir = str(tmpdir)
check_call('git init', cwd=dir, shell=True)
check_call('git config user.name Robot', cwd=dir, shell=True)
check_call('git config user.email robot@example.com', cwd=dir, shell=True)
tmpdir.join('test').write_text('test', 'utf8')
check_call('git add .', cwd=dir, shell=True)
check_call('git commit -mtest', cwd=dir, shell=True)
assert len(get_current_commit(dir)) == 40
|
Add a test for get_current_commitfrom subprocess import check_call
from valohai_cli.git import get_current_commit
def test_get_current_commit(tmpdir):
dir = str(tmpdir)
check_call('git init', cwd=dir, shell=True)
check_call('git config user.name Robot', cwd=dir, shell=True)
check_call('git config user.email robot@example.com', cwd=dir, shell=True)
tmpdir.join('test').write_text('test', 'utf8')
check_call('git add .', cwd=dir, shell=True)
check_call('git commit -mtest', cwd=dir, shell=True)
assert len(get_current_commit(dir)) == 40
|
<commit_before><commit_msg>Add a test for get_current_commit<commit_after>from subprocess import check_call
from valohai_cli.git import get_current_commit
def test_get_current_commit(tmpdir):
dir = str(tmpdir)
check_call('git init', cwd=dir, shell=True)
check_call('git config user.name Robot', cwd=dir, shell=True)
check_call('git config user.email robot@example.com', cwd=dir, shell=True)
tmpdir.join('test').write_text('test', 'utf8')
check_call('git add .', cwd=dir, shell=True)
check_call('git commit -mtest', cwd=dir, shell=True)
assert len(get_current_commit(dir)) == 40
|
|
1c2e7d773efa8e015bfa964fefe4c50c3cd9ac46
|
src/lib/arcgis_data_source.py
|
src/lib/arcgis_data_source.py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import uuid
from pathlib import Path
from typing import Any, Callable, Dict, List
import requests
from pandas import DataFrame
from lib.data_source import DataSource
def _download_arcgis(
url: str, offset: int = 0, log_func: Callable[[str], None] = None
) -> List[Dict[str, Any]]:
"""
Recursively download all records from an ArcGIS data source respecting the maximum record
transfer per request.
"""
url_tpl = url + "&resultOffset={offset}"
try:
res = requests.get(url_tpl.format(offset=offset)).json()["features"]
except Exception as exc:
if log_func:
log_func(requests.get(url_tpl.format(offset=offset)).text)
raise exc
rows = [row["attributes"] for row in res]
if len(rows) == 0:
return rows
else:
return rows + _download_arcgis(url, offset=offset + len(rows))
class ArcGISDataSource(DataSource):
def fetch(
self, output_folder: Path, cache: Dict[str, str], fetch_opts: List[Dict[str, Any]]
) -> Dict[str, str]:
downloaded_files = {}
for idx, opts in enumerate(fetch_opts):
# Base URL comes from fetch_opts
url_base = opts["url"]
# Create a deterministic file name
file_path = (
output_folder
/ "snapshot"
/ ("%s.%s" % (uuid.uuid5(uuid.NAMESPACE_DNS, url_base), "json"))
)
# Avoid download if the file exists and flag is set
skip_existing = opts.get("opts", {}).get("skip_existing")
if not skip_existing or not file_path.exists():
with open(file_path, "w") as fd:
json.dump({"features": _download_arcgis(url_base)}, fd)
# Add downloaded file to the list
downloaded_files[opts.get("name", idx)] = str(file_path.absolute())
return downloaded_files
def parse(self, sources: Dict[str, str], aux: Dict[str, DataFrame], **parse_opts) -> DataFrame:
dataframes = {}
for name, file_path in sources.items():
with open(file_path, "r") as fd:
records = json.load(fd)["features"]
dataframes[name] = DataFrame.from_records(records)
return self.parse_dataframes(dataframes, aux, **parse_opts)
|
Add ArcGIS data source util
|
Add ArcGIS data source util
|
Python
|
apache-2.0
|
GoogleCloudPlatform/covid-19-open-data,GoogleCloudPlatform/covid-19-open-data
|
Add ArcGIS data source util
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import uuid
from pathlib import Path
from typing import Any, Callable, Dict, List
import requests
from pandas import DataFrame
from lib.data_source import DataSource
def _download_arcgis(
url: str, offset: int = 0, log_func: Callable[[str], None] = None
) -> List[Dict[str, Any]]:
"""
Recursively download all records from an ArcGIS data source respecting the maximum record
transfer per request.
"""
url_tpl = url + "&resultOffset={offset}"
try:
res = requests.get(url_tpl.format(offset=offset)).json()["features"]
except Exception as exc:
if log_func:
log_func(requests.get(url_tpl.format(offset=offset)).text)
raise exc
rows = [row["attributes"] for row in res]
if len(rows) == 0:
return rows
else:
return rows + _download_arcgis(url, offset=offset + len(rows))
class ArcGISDataSource(DataSource):
def fetch(
self, output_folder: Path, cache: Dict[str, str], fetch_opts: List[Dict[str, Any]]
) -> Dict[str, str]:
downloaded_files = {}
for idx, opts in enumerate(fetch_opts):
# Base URL comes from fetch_opts
url_base = opts["url"]
# Create a deterministic file name
file_path = (
output_folder
/ "snapshot"
/ ("%s.%s" % (uuid.uuid5(uuid.NAMESPACE_DNS, url_base), "json"))
)
# Avoid download if the file exists and flag is set
skip_existing = opts.get("opts", {}).get("skip_existing")
if not skip_existing or not file_path.exists():
with open(file_path, "w") as fd:
json.dump({"features": _download_arcgis(url_base)}, fd)
# Add downloaded file to the list
downloaded_files[opts.get("name", idx)] = str(file_path.absolute())
return downloaded_files
def parse(self, sources: Dict[str, str], aux: Dict[str, DataFrame], **parse_opts) -> DataFrame:
dataframes = {}
for name, file_path in sources.items():
with open(file_path, "r") as fd:
records = json.load(fd)["features"]
dataframes[name] = DataFrame.from_records(records)
return self.parse_dataframes(dataframes, aux, **parse_opts)
|
<commit_before><commit_msg>Add ArcGIS data source util<commit_after>
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import uuid
from pathlib import Path
from typing import Any, Callable, Dict, List
import requests
from pandas import DataFrame
from lib.data_source import DataSource
def _download_arcgis(
url: str, offset: int = 0, log_func: Callable[[str], None] = None
) -> List[Dict[str, Any]]:
"""
Recursively download all records from an ArcGIS data source respecting the maximum record
transfer per request.
"""
url_tpl = url + "&resultOffset={offset}"
try:
res = requests.get(url_tpl.format(offset=offset)).json()["features"]
except Exception as exc:
if log_func:
log_func(requests.get(url_tpl.format(offset=offset)).text)
raise exc
rows = [row["attributes"] for row in res]
if len(rows) == 0:
return rows
else:
return rows + _download_arcgis(url, offset=offset + len(rows))
class ArcGISDataSource(DataSource):
def fetch(
self, output_folder: Path, cache: Dict[str, str], fetch_opts: List[Dict[str, Any]]
) -> Dict[str, str]:
downloaded_files = {}
for idx, opts in enumerate(fetch_opts):
# Base URL comes from fetch_opts
url_base = opts["url"]
# Create a deterministic file name
file_path = (
output_folder
/ "snapshot"
/ ("%s.%s" % (uuid.uuid5(uuid.NAMESPACE_DNS, url_base), "json"))
)
# Avoid download if the file exists and flag is set
skip_existing = opts.get("opts", {}).get("skip_existing")
if not skip_existing or not file_path.exists():
with open(file_path, "w") as fd:
json.dump({"features": _download_arcgis(url_base)}, fd)
# Add downloaded file to the list
downloaded_files[opts.get("name", idx)] = str(file_path.absolute())
return downloaded_files
def parse(self, sources: Dict[str, str], aux: Dict[str, DataFrame], **parse_opts) -> DataFrame:
dataframes = {}
for name, file_path in sources.items():
with open(file_path, "r") as fd:
records = json.load(fd)["features"]
dataframes[name] = DataFrame.from_records(records)
return self.parse_dataframes(dataframes, aux, **parse_opts)
|
Add ArcGIS data source util# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import uuid
from pathlib import Path
from typing import Any, Callable, Dict, List
import requests
from pandas import DataFrame
from lib.data_source import DataSource
def _download_arcgis(
url: str, offset: int = 0, log_func: Callable[[str], None] = None
) -> List[Dict[str, Any]]:
"""
Recursively download all records from an ArcGIS data source respecting the maximum record
transfer per request.
"""
url_tpl = url + "&resultOffset={offset}"
try:
res = requests.get(url_tpl.format(offset=offset)).json()["features"]
except Exception as exc:
if log_func:
log_func(requests.get(url_tpl.format(offset=offset)).text)
raise exc
rows = [row["attributes"] for row in res]
if len(rows) == 0:
return rows
else:
return rows + _download_arcgis(url, offset=offset + len(rows))
class ArcGISDataSource(DataSource):
def fetch(
self, output_folder: Path, cache: Dict[str, str], fetch_opts: List[Dict[str, Any]]
) -> Dict[str, str]:
downloaded_files = {}
for idx, opts in enumerate(fetch_opts):
# Base URL comes from fetch_opts
url_base = opts["url"]
# Create a deterministic file name
file_path = (
output_folder
/ "snapshot"
/ ("%s.%s" % (uuid.uuid5(uuid.NAMESPACE_DNS, url_base), "json"))
)
# Avoid download if the file exists and flag is set
skip_existing = opts.get("opts", {}).get("skip_existing")
if not skip_existing or not file_path.exists():
with open(file_path, "w") as fd:
json.dump({"features": _download_arcgis(url_base)}, fd)
# Add downloaded file to the list
downloaded_files[opts.get("name", idx)] = str(file_path.absolute())
return downloaded_files
def parse(self, sources: Dict[str, str], aux: Dict[str, DataFrame], **parse_opts) -> DataFrame:
dataframes = {}
for name, file_path in sources.items():
with open(file_path, "r") as fd:
records = json.load(fd)["features"]
dataframes[name] = DataFrame.from_records(records)
return self.parse_dataframes(dataframes, aux, **parse_opts)
|
<commit_before><commit_msg>Add ArcGIS data source util<commit_after># Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import uuid
from pathlib import Path
from typing import Any, Callable, Dict, List
import requests
from pandas import DataFrame
from lib.data_source import DataSource
def _download_arcgis(
url: str, offset: int = 0, log_func: Callable[[str], None] = None
) -> List[Dict[str, Any]]:
"""
Recursively download all records from an ArcGIS data source respecting the maximum record
transfer per request.
"""
url_tpl = url + "&resultOffset={offset}"
try:
res = requests.get(url_tpl.format(offset=offset)).json()["features"]
except Exception as exc:
if log_func:
log_func(requests.get(url_tpl.format(offset=offset)).text)
raise exc
rows = [row["attributes"] for row in res]
if len(rows) == 0:
return rows
else:
return rows + _download_arcgis(url, offset=offset + len(rows))
class ArcGISDataSource(DataSource):
def fetch(
self, output_folder: Path, cache: Dict[str, str], fetch_opts: List[Dict[str, Any]]
) -> Dict[str, str]:
downloaded_files = {}
for idx, opts in enumerate(fetch_opts):
# Base URL comes from fetch_opts
url_base = opts["url"]
# Create a deterministic file name
file_path = (
output_folder
/ "snapshot"
/ ("%s.%s" % (uuid.uuid5(uuid.NAMESPACE_DNS, url_base), "json"))
)
# Avoid download if the file exists and flag is set
skip_existing = opts.get("opts", {}).get("skip_existing")
if not skip_existing or not file_path.exists():
with open(file_path, "w") as fd:
json.dump({"features": _download_arcgis(url_base)}, fd)
# Add downloaded file to the list
downloaded_files[opts.get("name", idx)] = str(file_path.absolute())
return downloaded_files
def parse(self, sources: Dict[str, str], aux: Dict[str, DataFrame], **parse_opts) -> DataFrame:
dataframes = {}
for name, file_path in sources.items():
with open(file_path, "r") as fd:
records = json.load(fd)["features"]
dataframes[name] = DataFrame.from_records(records)
return self.parse_dataframes(dataframes, aux, **parse_opts)
|
|
396d8f6774ad4d75e15ae13481c04e9c9241204d
|
writer/kafka_sample_writer.py
|
writer/kafka_sample_writer.py
|
from kafka import KafkaClient, create_message
from kafka.protocol import KafkaProtocol
from kafka.common import ProduceRequest
import random
import logging
class KafkaSampleWriter(object):
"""
KafkaSampleWriter can be used to write sample messages into Kafka for
benchmark purposes
"""
def __init__(self, config, batches = 1000, batch_size = 1000):
self.config = config
self.batches = batches
self.batch_size = batch_size
# Sample messages for benchmark
self.sample_messages = [
"""26f2fc918f50.load.load.shortterm 0.05 1436357630
26f2fc918f50.load.load.midterm 0.05 1436357630
26f2fc918f50.load.load.longterm 0.05 1436357630""",
"26f2fc918f50.cpu-0.cpu-user 30364 1436357630",
"26f2fc918f50.memory.memory-buffered 743657472 1436357630"
]
def produce_messages(self):
"""
Produce sample messages
"""
# TODO: Support different kafka port
kafka = KafkaClient(self.config.kafka_host)
total_messages = self.batches * self.batch_size
messages_batch = [create_message(random.choice(self.sample_messages)) for r in range(self.batch_size)]
for i in range(self.batches):
req = ProduceRequest(topic=self.config.kafka_topic, partition=0, messages=messages_batch)
resps = kafka.send_produce_request(payloads=[req], fail_on_error=True)
sent_messages = i * self.batch_size
logging.info('Created {} out of {} sample messages'.format(sent_messages, total_messages))
kafka.close()
|
Add kafka writer for benchmarks
|
Add kafka writer for benchmarks
|
Python
|
apache-2.0
|
mre/kafka-influxdb,mre/kafka-influxdb
|
Add kafka writer for benchmarks
|
from kafka import KafkaClient, create_message
from kafka.protocol import KafkaProtocol
from kafka.common import ProduceRequest
import random
import logging
class KafkaSampleWriter(object):
"""
KafkaSampleWriter can be used to write sample messages into Kafka for
benchmark purposes
"""
def __init__(self, config, batches = 1000, batch_size = 1000):
self.config = config
self.batches = batches
self.batch_size = batch_size
# Sample messages for benchmark
self.sample_messages = [
"""26f2fc918f50.load.load.shortterm 0.05 1436357630
26f2fc918f50.load.load.midterm 0.05 1436357630
26f2fc918f50.load.load.longterm 0.05 1436357630""",
"26f2fc918f50.cpu-0.cpu-user 30364 1436357630",
"26f2fc918f50.memory.memory-buffered 743657472 1436357630"
]
def produce_messages(self):
"""
Produce sample messages
"""
# TODO: Support different kafka port
kafka = KafkaClient(self.config.kafka_host)
total_messages = self.batches * self.batch_size
messages_batch = [create_message(random.choice(self.sample_messages)) for r in range(self.batch_size)]
for i in range(self.batches):
req = ProduceRequest(topic=self.config.kafka_topic, partition=0, messages=messages_batch)
resps = kafka.send_produce_request(payloads=[req], fail_on_error=True)
sent_messages = i * self.batch_size
logging.info('Created {} out of {} sample messages'.format(sent_messages, total_messages))
kafka.close()
|
<commit_before><commit_msg>Add kafka writer for benchmarks<commit_after>
|
from kafka import KafkaClient, create_message
from kafka.protocol import KafkaProtocol
from kafka.common import ProduceRequest
import random
import logging
class KafkaSampleWriter(object):
"""
KafkaSampleWriter can be used to write sample messages into Kafka for
benchmark purposes
"""
def __init__(self, config, batches = 1000, batch_size = 1000):
self.config = config
self.batches = batches
self.batch_size = batch_size
# Sample messages for benchmark
self.sample_messages = [
"""26f2fc918f50.load.load.shortterm 0.05 1436357630
26f2fc918f50.load.load.midterm 0.05 1436357630
26f2fc918f50.load.load.longterm 0.05 1436357630""",
"26f2fc918f50.cpu-0.cpu-user 30364 1436357630",
"26f2fc918f50.memory.memory-buffered 743657472 1436357630"
]
def produce_messages(self):
"""
Produce sample messages
"""
# TODO: Support different kafka port
kafka = KafkaClient(self.config.kafka_host)
total_messages = self.batches * self.batch_size
messages_batch = [create_message(random.choice(self.sample_messages)) for r in range(self.batch_size)]
for i in range(self.batches):
req = ProduceRequest(topic=self.config.kafka_topic, partition=0, messages=messages_batch)
resps = kafka.send_produce_request(payloads=[req], fail_on_error=True)
sent_messages = i * self.batch_size
logging.info('Created {} out of {} sample messages'.format(sent_messages, total_messages))
kafka.close()
|
Add kafka writer for benchmarksfrom kafka import KafkaClient, create_message
from kafka.protocol import KafkaProtocol
from kafka.common import ProduceRequest
import random
import logging
class KafkaSampleWriter(object):
"""
KafkaSampleWriter can be used to write sample messages into Kafka for
benchmark purposes
"""
def __init__(self, config, batches = 1000, batch_size = 1000):
self.config = config
self.batches = batches
self.batch_size = batch_size
# Sample messages for benchmark
self.sample_messages = [
"""26f2fc918f50.load.load.shortterm 0.05 1436357630
26f2fc918f50.load.load.midterm 0.05 1436357630
26f2fc918f50.load.load.longterm 0.05 1436357630""",
"26f2fc918f50.cpu-0.cpu-user 30364 1436357630",
"26f2fc918f50.memory.memory-buffered 743657472 1436357630"
]
def produce_messages(self):
"""
Produce sample messages
"""
# TODO: Support different kafka port
kafka = KafkaClient(self.config.kafka_host)
total_messages = self.batches * self.batch_size
messages_batch = [create_message(random.choice(self.sample_messages)) for r in range(self.batch_size)]
for i in range(self.batches):
req = ProduceRequest(topic=self.config.kafka_topic, partition=0, messages=messages_batch)
resps = kafka.send_produce_request(payloads=[req], fail_on_error=True)
sent_messages = i * self.batch_size
logging.info('Created {} out of {} sample messages'.format(sent_messages, total_messages))
kafka.close()
|
<commit_before><commit_msg>Add kafka writer for benchmarks<commit_after>from kafka import KafkaClient, create_message
from kafka.protocol import KafkaProtocol
from kafka.common import ProduceRequest
import random
import logging
class KafkaSampleWriter(object):
"""
KafkaSampleWriter can be used to write sample messages into Kafka for
benchmark purposes
"""
def __init__(self, config, batches = 1000, batch_size = 1000):
self.config = config
self.batches = batches
self.batch_size = batch_size
# Sample messages for benchmark
self.sample_messages = [
"""26f2fc918f50.load.load.shortterm 0.05 1436357630
26f2fc918f50.load.load.midterm 0.05 1436357630
26f2fc918f50.load.load.longterm 0.05 1436357630""",
"26f2fc918f50.cpu-0.cpu-user 30364 1436357630",
"26f2fc918f50.memory.memory-buffered 743657472 1436357630"
]
def produce_messages(self):
"""
Produce sample messages
"""
# TODO: Support different kafka port
kafka = KafkaClient(self.config.kafka_host)
total_messages = self.batches * self.batch_size
messages_batch = [create_message(random.choice(self.sample_messages)) for r in range(self.batch_size)]
for i in range(self.batches):
req = ProduceRequest(topic=self.config.kafka_topic, partition=0, messages=messages_batch)
resps = kafka.send_produce_request(payloads=[req], fail_on_error=True)
sent_messages = i * self.batch_size
logging.info('Created {} out of {} sample messages'.format(sent_messages, total_messages))
kafka.close()
|
|
31357d68a7d0fa473ef518e28f239cd2a8b1cb5d
|
seaborn/tests/test_miscplot.py
|
seaborn/tests/test_miscplot.py
|
import nose.tools as nt
import numpy.testing as npt
import matplotlib.pyplot as plt
from .. import miscplot as misc
from seaborn import color_palette
class TestPalPlot(object):
"""Test the function that visualizes a color palette."""
def test_palplot_size(self):
pal4 = color_palette("husl", 4)
misc.palplot(pal4)
size4 = plt.gcf().get_size_inches()
nt.assert_equal(tuple(size4), (4, 1))
pal5 = color_palette("husl", 5)
misc.palplot(pal5)
size5 = plt.gcf().get_size_inches()
nt.assert_equal(tuple(size5), (5, 1))
palbig = color_palette("husl", 3)
misc.palplot(palbig, 2)
sizebig = plt.gcf().get_size_inches()
nt.assert_equal(tuple(sizebig), (6, 2))
plt.close("all")
|
Add simple test for palplot
|
Add simple test for palplot
|
Python
|
bsd-3-clause
|
bsipocz/seaborn,ashhher3/seaborn,petebachant/seaborn,wrobstory/seaborn,mia1rab/seaborn,parantapa/seaborn,mwaskom/seaborn,ebothmann/seaborn,q1ang/seaborn,nileracecrew/seaborn,mclevey/seaborn,anntzer/seaborn,ischwabacher/seaborn,cwu2011/seaborn,dotsdl/seaborn,sauliusl/seaborn,clarkfitzg/seaborn,lukauskas/seaborn,aashish24/seaborn,drewokane/seaborn,phobson/seaborn,tim777z/seaborn,jat255/seaborn,uhjish/seaborn,jakevdp/seaborn,anntzer/seaborn,gef756/seaborn,dhimmel/seaborn,Guokr1991/seaborn,kyleam/seaborn,mwaskom/seaborn,sinhrks/seaborn,yarikoptic/seaborn,aashish24/seaborn,arokem/seaborn,oesteban/seaborn,Lx37/seaborn,huongttlan/seaborn,lukauskas/seaborn,dimarkov/seaborn,JWarmenhoven/seaborn,arokem/seaborn,phobson/seaborn,muku42/seaborn,lypzln/seaborn,olgabot/seaborn
|
Add simple test for palplot
|
import nose.tools as nt
import numpy.testing as npt
import matplotlib.pyplot as plt
from .. import miscplot as misc
from seaborn import color_palette
class TestPalPlot(object):
"""Test the function that visualizes a color palette."""
def test_palplot_size(self):
pal4 = color_palette("husl", 4)
misc.palplot(pal4)
size4 = plt.gcf().get_size_inches()
nt.assert_equal(tuple(size4), (4, 1))
pal5 = color_palette("husl", 5)
misc.palplot(pal5)
size5 = plt.gcf().get_size_inches()
nt.assert_equal(tuple(size5), (5, 1))
palbig = color_palette("husl", 3)
misc.palplot(palbig, 2)
sizebig = plt.gcf().get_size_inches()
nt.assert_equal(tuple(sizebig), (6, 2))
plt.close("all")
|
<commit_before><commit_msg>Add simple test for palplot<commit_after>
|
import nose.tools as nt
import numpy.testing as npt
import matplotlib.pyplot as plt
from .. import miscplot as misc
from seaborn import color_palette
class TestPalPlot(object):
"""Test the function that visualizes a color palette."""
def test_palplot_size(self):
pal4 = color_palette("husl", 4)
misc.palplot(pal4)
size4 = plt.gcf().get_size_inches()
nt.assert_equal(tuple(size4), (4, 1))
pal5 = color_palette("husl", 5)
misc.palplot(pal5)
size5 = plt.gcf().get_size_inches()
nt.assert_equal(tuple(size5), (5, 1))
palbig = color_palette("husl", 3)
misc.palplot(palbig, 2)
sizebig = plt.gcf().get_size_inches()
nt.assert_equal(tuple(sizebig), (6, 2))
plt.close("all")
|
Add simple test for palplotimport nose.tools as nt
import numpy.testing as npt
import matplotlib.pyplot as plt
from .. import miscplot as misc
from seaborn import color_palette
class TestPalPlot(object):
"""Test the function that visualizes a color palette."""
def test_palplot_size(self):
pal4 = color_palette("husl", 4)
misc.palplot(pal4)
size4 = plt.gcf().get_size_inches()
nt.assert_equal(tuple(size4), (4, 1))
pal5 = color_palette("husl", 5)
misc.palplot(pal5)
size5 = plt.gcf().get_size_inches()
nt.assert_equal(tuple(size5), (5, 1))
palbig = color_palette("husl", 3)
misc.palplot(palbig, 2)
sizebig = plt.gcf().get_size_inches()
nt.assert_equal(tuple(sizebig), (6, 2))
plt.close("all")
|
<commit_before><commit_msg>Add simple test for palplot<commit_after>import nose.tools as nt
import numpy.testing as npt
import matplotlib.pyplot as plt
from .. import miscplot as misc
from seaborn import color_palette
class TestPalPlot(object):
"""Test the function that visualizes a color palette."""
def test_palplot_size(self):
pal4 = color_palette("husl", 4)
misc.palplot(pal4)
size4 = plt.gcf().get_size_inches()
nt.assert_equal(tuple(size4), (4, 1))
pal5 = color_palette("husl", 5)
misc.palplot(pal5)
size5 = plt.gcf().get_size_inches()
nt.assert_equal(tuple(size5), (5, 1))
palbig = color_palette("husl", 3)
misc.palplot(palbig, 2)
sizebig = plt.gcf().get_size_inches()
nt.assert_equal(tuple(sizebig), (6, 2))
plt.close("all")
|
|
a09bfa5ca64c52df68581849e1a96efe79dfc2ee
|
astropy/io/fits/tests/test_fitsdiff_openfile.py
|
astropy/io/fits/tests/test_fitsdiff_openfile.py
|
import pytest
from astropy.io import fits
import numpy as np
from pathlib import Path
def test_fitsdiff_openfile(tmpdir):
"""Make sure that failing FITSDiff doesn't leave open files"""
path1 = str(tmpdir.join("file1.fits"))
path2 = str(tmpdir.join("file2.fits"))
hdulist = fits.HDUList([fits.PrimaryHDU(), fits.ImageHDU(data=np.zeros((10)))])
hdulist.writeto(path1)
hdulist[1].data[0] = 1
hdulist.writeto(path2)
diff = fits.FITSDiff(path1, path2)
assert diff.identical, diff.report()
|
Add test showing --open-files error when FITSDiff raises AssertionError
|
Add test showing --open-files error when FITSDiff raises AssertionError
|
Python
|
bsd-3-clause
|
pllim/astropy,dhomeier/astropy,aleksandr-bakanov/astropy,aleksandr-bakanov/astropy,StuartLittlefair/astropy,saimn/astropy,larrybradley/astropy,mhvk/astropy,pllim/astropy,larrybradley/astropy,lpsinger/astropy,larrybradley/astropy,dhomeier/astropy,lpsinger/astropy,astropy/astropy,astropy/astropy,saimn/astropy,dhomeier/astropy,aleksandr-bakanov/astropy,mhvk/astropy,lpsinger/astropy,pllim/astropy,dhomeier/astropy,aleksandr-bakanov/astropy,StuartLittlefair/astropy,StuartLittlefair/astropy,pllim/astropy,lpsinger/astropy,saimn/astropy,astropy/astropy,saimn/astropy,astropy/astropy,larrybradley/astropy,mhvk/astropy,StuartLittlefair/astropy,lpsinger/astropy,dhomeier/astropy,mhvk/astropy,mhvk/astropy,saimn/astropy,astropy/astropy,StuartLittlefair/astropy,pllim/astropy,larrybradley/astropy
|
Add test showing --open-files error when FITSDiff raises AssertionError
|
import pytest
from astropy.io import fits
import numpy as np
from pathlib import Path
def test_fitsdiff_openfile(tmpdir):
"""Make sure that failing FITSDiff doesn't leave open files"""
path1 = str(tmpdir.join("file1.fits"))
path2 = str(tmpdir.join("file2.fits"))
hdulist = fits.HDUList([fits.PrimaryHDU(), fits.ImageHDU(data=np.zeros((10)))])
hdulist.writeto(path1)
hdulist[1].data[0] = 1
hdulist.writeto(path2)
diff = fits.FITSDiff(path1, path2)
assert diff.identical, diff.report()
|
<commit_before><commit_msg>Add test showing --open-files error when FITSDiff raises AssertionError<commit_after>
|
import pytest
from astropy.io import fits
import numpy as np
from pathlib import Path
def test_fitsdiff_openfile(tmpdir):
"""Make sure that failing FITSDiff doesn't leave open files"""
path1 = str(tmpdir.join("file1.fits"))
path2 = str(tmpdir.join("file2.fits"))
hdulist = fits.HDUList([fits.PrimaryHDU(), fits.ImageHDU(data=np.zeros((10)))])
hdulist.writeto(path1)
hdulist[1].data[0] = 1
hdulist.writeto(path2)
diff = fits.FITSDiff(path1, path2)
assert diff.identical, diff.report()
|
Add test showing --open-files error when FITSDiff raises AssertionErrorimport pytest
from astropy.io import fits
import numpy as np
from pathlib import Path
def test_fitsdiff_openfile(tmpdir):
"""Make sure that failing FITSDiff doesn't leave open files"""
path1 = str(tmpdir.join("file1.fits"))
path2 = str(tmpdir.join("file2.fits"))
hdulist = fits.HDUList([fits.PrimaryHDU(), fits.ImageHDU(data=np.zeros((10)))])
hdulist.writeto(path1)
hdulist[1].data[0] = 1
hdulist.writeto(path2)
diff = fits.FITSDiff(path1, path2)
assert diff.identical, diff.report()
|
<commit_before><commit_msg>Add test showing --open-files error when FITSDiff raises AssertionError<commit_after>import pytest
from astropy.io import fits
import numpy as np
from pathlib import Path
def test_fitsdiff_openfile(tmpdir):
"""Make sure that failing FITSDiff doesn't leave open files"""
path1 = str(tmpdir.join("file1.fits"))
path2 = str(tmpdir.join("file2.fits"))
hdulist = fits.HDUList([fits.PrimaryHDU(), fits.ImageHDU(data=np.zeros((10)))])
hdulist.writeto(path1)
hdulist[1].data[0] = 1
hdulist.writeto(path2)
diff = fits.FITSDiff(path1, path2)
assert diff.identical, diff.report()
|
|
e1ea5c1c3f1279aca22341bd83b3f73acf50d332
|
DataWrangling/CaseStudy/tags.py
|
DataWrangling/CaseStudy/tags.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import xml.etree.cElementTree as ET
import pprint
import re
import os
"""
Your task is to explore the data a bit more.
Before you process the data and add it into your database, you should check the
"k" value for each "<tag>" and see if there are any potential problems.
We have provided you with 3 regular expressions to check for certain patterns
in the tags. As we saw in the quiz earlier, we would like to change the data
model and expand the "addr:street" type of keys to a dictionary like this:
{"address": {"street": "Some value"}}
So, we have to see if we have such tags, and if we have any tags with
problematic characters.
Please complete the function 'key_type', such that we have a count of each of
four tag categories in a dictionary:
"lower", for tags that contain only lowercase letters and are valid,
"lower_colon", for otherwise valid tags with a colon in their names,
"problemchars", for tags with problematic characters, and
"other", for other tags that do not fall into the other three categories.
See the 'process_map' and 'test' functions for examples of the expected format.
"""
lower = re.compile(r'^([a-z]|_)*$')
lower_colon = re.compile(r'^([a-z]|_)*:([a-z]|_)*$')
problemchars = re.compile(r'[=\+/&<>;\'"\?%#$@\,\. \t\r\n]')
def key_type(element, keys):
if element.tag == "tag":
# YOUR CODE HERE re.match(lower, element.attrib['k'], flags=0)
if re.search(lower, element.attrib['k']):
keys["lower"] += 1
elif re.search(lower_colon, element.attrib['k']):
keys["lower_colon"] += 1
elif re.search(problemchars, element.attrib['k']):
keys["problemchars"] += 1
else:
keys["other"] += 1
pass
return keys
def process_map(filename):
keys = {"lower": 0, "lower_colon": 0, "problemchars": 0, "other": 0}
for _, element in ET.iterparse(filename):
keys = key_type(element, keys)
return keys
def test():
# You can use another testfile 'map.osm' to look at your solution
# Note that the assertion below will be incorrect then.
# Note as well that the test function here is only used in the Test Run;
# when you submit, your code will be checked against a different dataset.
os.chdir('./data')
keys = process_map('example1.osm')
pprint.pprint(keys)
assert keys == {'lower': 5, 'lower_colon': 0, 'other': 1, 'problemchars': 1}
if __name__ == "__main__":
test()
|
Add a script which check the <k> value for each <tag> and see if there are any potential problems
|
feat: Add a script which check the <k> value for each <tag> and see if there are any potential problems
|
Python
|
mit
|
aguijarro/DataSciencePython
|
feat: Add a script which check the <k> value for each <tag> and see if there are any potential problems
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import xml.etree.cElementTree as ET
import pprint
import re
import os
"""
Your task is to explore the data a bit more.
Before you process the data and add it into your database, you should check the
"k" value for each "<tag>" and see if there are any potential problems.
We have provided you with 3 regular expressions to check for certain patterns
in the tags. As we saw in the quiz earlier, we would like to change the data
model and expand the "addr:street" type of keys to a dictionary like this:
{"address": {"street": "Some value"}}
So, we have to see if we have such tags, and if we have any tags with
problematic characters.
Please complete the function 'key_type', such that we have a count of each of
four tag categories in a dictionary:
"lower", for tags that contain only lowercase letters and are valid,
"lower_colon", for otherwise valid tags with a colon in their names,
"problemchars", for tags with problematic characters, and
"other", for other tags that do not fall into the other three categories.
See the 'process_map' and 'test' functions for examples of the expected format.
"""
lower = re.compile(r'^([a-z]|_)*$')
lower_colon = re.compile(r'^([a-z]|_)*:([a-z]|_)*$')
problemchars = re.compile(r'[=\+/&<>;\'"\?%#$@\,\. \t\r\n]')
def key_type(element, keys):
if element.tag == "tag":
# YOUR CODE HERE re.match(lower, element.attrib['k'], flags=0)
if re.search(lower, element.attrib['k']):
keys["lower"] += 1
elif re.search(lower_colon, element.attrib['k']):
keys["lower_colon"] += 1
elif re.search(problemchars, element.attrib['k']):
keys["problemchars"] += 1
else:
keys["other"] += 1
pass
return keys
def process_map(filename):
keys = {"lower": 0, "lower_colon": 0, "problemchars": 0, "other": 0}
for _, element in ET.iterparse(filename):
keys = key_type(element, keys)
return keys
def test():
# You can use another testfile 'map.osm' to look at your solution
# Note that the assertion below will be incorrect then.
# Note as well that the test function here is only used in the Test Run;
# when you submit, your code will be checked against a different dataset.
os.chdir('./data')
keys = process_map('example1.osm')
pprint.pprint(keys)
assert keys == {'lower': 5, 'lower_colon': 0, 'other': 1, 'problemchars': 1}
if __name__ == "__main__":
test()
|
<commit_before><commit_msg>feat: Add a script which check the <k> value for each <tag> and see if there are any potential problems<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import xml.etree.cElementTree as ET
import pprint
import re
import os
"""
Your task is to explore the data a bit more.
Before you process the data and add it into your database, you should check the
"k" value for each "<tag>" and see if there are any potential problems.
We have provided you with 3 regular expressions to check for certain patterns
in the tags. As we saw in the quiz earlier, we would like to change the data
model and expand the "addr:street" type of keys to a dictionary like this:
{"address": {"street": "Some value"}}
So, we have to see if we have such tags, and if we have any tags with
problematic characters.
Please complete the function 'key_type', such that we have a count of each of
four tag categories in a dictionary:
"lower", for tags that contain only lowercase letters and are valid,
"lower_colon", for otherwise valid tags with a colon in their names,
"problemchars", for tags with problematic characters, and
"other", for other tags that do not fall into the other three categories.
See the 'process_map' and 'test' functions for examples of the expected format.
"""
lower = re.compile(r'^([a-z]|_)*$')
lower_colon = re.compile(r'^([a-z]|_)*:([a-z]|_)*$')
problemchars = re.compile(r'[=\+/&<>;\'"\?%#$@\,\. \t\r\n]')
def key_type(element, keys):
if element.tag == "tag":
# YOUR CODE HERE re.match(lower, element.attrib['k'], flags=0)
if re.search(lower, element.attrib['k']):
keys["lower"] += 1
elif re.search(lower_colon, element.attrib['k']):
keys["lower_colon"] += 1
elif re.search(problemchars, element.attrib['k']):
keys["problemchars"] += 1
else:
keys["other"] += 1
pass
return keys
def process_map(filename):
keys = {"lower": 0, "lower_colon": 0, "problemchars": 0, "other": 0}
for _, element in ET.iterparse(filename):
keys = key_type(element, keys)
return keys
def test():
# You can use another testfile 'map.osm' to look at your solution
# Note that the assertion below will be incorrect then.
# Note as well that the test function here is only used in the Test Run;
# when you submit, your code will be checked against a different dataset.
os.chdir('./data')
keys = process_map('example1.osm')
pprint.pprint(keys)
assert keys == {'lower': 5, 'lower_colon': 0, 'other': 1, 'problemchars': 1}
if __name__ == "__main__":
test()
|
feat: Add a script which check the <k> value for each <tag> and see if there are any potential problems#!/usr/bin/env python
# -*- coding: utf-8 -*-
import xml.etree.cElementTree as ET
import pprint
import re
import os
"""
Your task is to explore the data a bit more.
Before you process the data and add it into your database, you should check the
"k" value for each "<tag>" and see if there are any potential problems.
We have provided you with 3 regular expressions to check for certain patterns
in the tags. As we saw in the quiz earlier, we would like to change the data
model and expand the "addr:street" type of keys to a dictionary like this:
{"address": {"street": "Some value"}}
So, we have to see if we have such tags, and if we have any tags with
problematic characters.
Please complete the function 'key_type', such that we have a count of each of
four tag categories in a dictionary:
"lower", for tags that contain only lowercase letters and are valid,
"lower_colon", for otherwise valid tags with a colon in their names,
"problemchars", for tags with problematic characters, and
"other", for other tags that do not fall into the other three categories.
See the 'process_map' and 'test' functions for examples of the expected format.
"""
lower = re.compile(r'^([a-z]|_)*$')
lower_colon = re.compile(r'^([a-z]|_)*:([a-z]|_)*$')
problemchars = re.compile(r'[=\+/&<>;\'"\?%#$@\,\. \t\r\n]')
def key_type(element, keys):
if element.tag == "tag":
# YOUR CODE HERE re.match(lower, element.attrib['k'], flags=0)
if re.search(lower, element.attrib['k']):
keys["lower"] += 1
elif re.search(lower_colon, element.attrib['k']):
keys["lower_colon"] += 1
elif re.search(problemchars, element.attrib['k']):
keys["problemchars"] += 1
else:
keys["other"] += 1
pass
return keys
def process_map(filename):
keys = {"lower": 0, "lower_colon": 0, "problemchars": 0, "other": 0}
for _, element in ET.iterparse(filename):
keys = key_type(element, keys)
return keys
def test():
# You can use another testfile 'map.osm' to look at your solution
# Note that the assertion below will be incorrect then.
# Note as well that the test function here is only used in the Test Run;
# when you submit, your code will be checked against a different dataset.
os.chdir('./data')
keys = process_map('example1.osm')
pprint.pprint(keys)
assert keys == {'lower': 5, 'lower_colon': 0, 'other': 1, 'problemchars': 1}
if __name__ == "__main__":
test()
|
<commit_before><commit_msg>feat: Add a script which check the <k> value for each <tag> and see if there are any potential problems<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import xml.etree.cElementTree as ET
import pprint
import re
import os
"""
Your task is to explore the data a bit more.
Before you process the data and add it into your database, you should check the
"k" value for each "<tag>" and see if there are any potential problems.
We have provided you with 3 regular expressions to check for certain patterns
in the tags. As we saw in the quiz earlier, we would like to change the data
model and expand the "addr:street" type of keys to a dictionary like this:
{"address": {"street": "Some value"}}
So, we have to see if we have such tags, and if we have any tags with
problematic characters.
Please complete the function 'key_type', such that we have a count of each of
four tag categories in a dictionary:
"lower", for tags that contain only lowercase letters and are valid,
"lower_colon", for otherwise valid tags with a colon in their names,
"problemchars", for tags with problematic characters, and
"other", for other tags that do not fall into the other three categories.
See the 'process_map' and 'test' functions for examples of the expected format.
"""
lower = re.compile(r'^([a-z]|_)*$')
lower_colon = re.compile(r'^([a-z]|_)*:([a-z]|_)*$')
problemchars = re.compile(r'[=\+/&<>;\'"\?%#$@\,\. \t\r\n]')
def key_type(element, keys):
if element.tag == "tag":
# YOUR CODE HERE re.match(lower, element.attrib['k'], flags=0)
if re.search(lower, element.attrib['k']):
keys["lower"] += 1
elif re.search(lower_colon, element.attrib['k']):
keys["lower_colon"] += 1
elif re.search(problemchars, element.attrib['k']):
keys["problemchars"] += 1
else:
keys["other"] += 1
pass
return keys
def process_map(filename):
keys = {"lower": 0, "lower_colon": 0, "problemchars": 0, "other": 0}
for _, element in ET.iterparse(filename):
keys = key_type(element, keys)
return keys
def test():
# You can use another testfile 'map.osm' to look at your solution
# Note that the assertion below will be incorrect then.
# Note as well that the test function here is only used in the Test Run;
# when you submit, your code will be checked against a different dataset.
os.chdir('./data')
keys = process_map('example1.osm')
pprint.pprint(keys)
assert keys == {'lower': 5, 'lower_colon': 0, 'other': 1, 'problemchars': 1}
if __name__ == "__main__":
test()
|
|
b3fbc81bf4c00d23042cebc34503c6cf6937db22
|
test/dunyatest.py
|
test/dunyatest.py
|
import unittest
from compmusic.dunya.conn import _make_url
class DunyaTest(unittest.TestCase):
def test_unicode(self):
params = {"first": "%^grt"}
url = _make_url("path", **params)
self.assertEqual(url, 'http://dunya.compmusic.upf.edu/path?first=%25%5Egrt')
|
Add test for conn._make_url. Test if url is encoded properly
|
Add test for conn._make_url. Test if url is encoded properly
|
Python
|
agpl-3.0
|
MTG/pycompmusic
|
Add test for conn._make_url. Test if url is encoded properly
|
import unittest
from compmusic.dunya.conn import _make_url
class DunyaTest(unittest.TestCase):
def test_unicode(self):
params = {"first": "%^grt"}
url = _make_url("path", **params)
self.assertEqual(url, 'http://dunya.compmusic.upf.edu/path?first=%25%5Egrt')
|
<commit_before><commit_msg>Add test for conn._make_url. Test if url is encoded properly<commit_after>
|
import unittest
from compmusic.dunya.conn import _make_url
class DunyaTest(unittest.TestCase):
def test_unicode(self):
params = {"first": "%^grt"}
url = _make_url("path", **params)
self.assertEqual(url, 'http://dunya.compmusic.upf.edu/path?first=%25%5Egrt')
|
Add test for conn._make_url. Test if url is encoded properlyimport unittest
from compmusic.dunya.conn import _make_url
class DunyaTest(unittest.TestCase):
def test_unicode(self):
params = {"first": "%^grt"}
url = _make_url("path", **params)
self.assertEqual(url, 'http://dunya.compmusic.upf.edu/path?first=%25%5Egrt')
|
<commit_before><commit_msg>Add test for conn._make_url. Test if url is encoded properly<commit_after>import unittest
from compmusic.dunya.conn import _make_url
class DunyaTest(unittest.TestCase):
def test_unicode(self):
params = {"first": "%^grt"}
url = _make_url("path", **params)
self.assertEqual(url, 'http://dunya.compmusic.upf.edu/path?first=%25%5Egrt')
|
|
749ab21acc35bd93eb402dc95cc6e8729165a4b8
|
elections/2008/shapes/coords.py
|
elections/2008/shapes/coords.py
|
#!/usr/bin/env python
import math
def geoToPixel( point, zoom, tilesize=256 ):
lng = point[0]
if lng > 180.0: lng -= 360.0
lng = lng / 360.0 + 0.5
lat = point[1]
lat = 0.5 - ( math.log( math.tan( ( math.pi / 4.0 ) + ( lat * math.pi / 360.0 ) ) ) / math.pi / 2.0 );
scale = ( 1 << zoom ) * tilesize
return [ int( lng * scale ), int( lat * scale ) ]
print geoToPixel( [ 0.0, 0.0 ], 0 ) == [ 128, 128 ]
print geoToPixel( [ 0.0, 0.0 ], 1 ) == [ 256, 256 ]
print geoToPixel( [ -60.0, 45.0 ], 0 ) == [ 85, 92 ]
print geoToPixel( [ -60.0, 45.0 ], 1 ) == [ 170, 184 ]
|
Add geographic coordinte to pixel coordinate converter
|
Add geographic coordinte to pixel coordinate converter
|
Python
|
apache-2.0
|
cureHsu/js-v2-samples,cureHsu/js-v2-samples,feeilk1991/promenad,feeilk1991/promenad,stephenmcd/js-v2-samples,feeilk1991/promenad,googlearchive/js-v2-samples,bawg/js-v2-samples,alexander0205/js-v2-samples,googlearchive/js-v2-samples,cureHsu/js-v2-samples,googlearchive/js-v2-samples,cureHsu/js-v2-samples,feeilk1991/promenad,googlearchive/js-v2-samples,alexander0205/js-v2-samples,bawg/js-v2-samples,alexander0205/js-v2-samples,stephenmcd/js-v2-samples,cureHsu/js-v2-samples,bawg/js-v2-samples,googlearchive/js-v2-samples,feeilk1991/promenad,feeilk1991/promenad,cureHsu/js-v2-samples,bawg/js-v2-samples,googlearchive/js-v2-samples,googlearchive/js-v2-samples,stephenmcd/js-v2-samples,stephenmcd/js-v2-samples,bawg/js-v2-samples,bawg/js-v2-samples,cureHsu/js-v2-samples,feeilk1991/promenad,alexander0205/js-v2-samples,stephenmcd/js-v2-samples,alexander0205/js-v2-samples,alexander0205/js-v2-samples,alexander0205/js-v2-samples,googlearchive/js-v2-samples,stephenmcd/js-v2-samples,bawg/js-v2-samples,feeilk1991/promenad,bawg/js-v2-samples,stephenmcd/js-v2-samples,stephenmcd/js-v2-samples,alexander0205/js-v2-samples,cureHsu/js-v2-samples
|
Add geographic coordinte to pixel coordinate converter
|
#!/usr/bin/env python
import math
def geoToPixel( point, zoom, tilesize=256 ):
lng = point[0]
if lng > 180.0: lng -= 360.0
lng = lng / 360.0 + 0.5
lat = point[1]
lat = 0.5 - ( math.log( math.tan( ( math.pi / 4.0 ) + ( lat * math.pi / 360.0 ) ) ) / math.pi / 2.0 );
scale = ( 1 << zoom ) * tilesize
return [ int( lng * scale ), int( lat * scale ) ]
print geoToPixel( [ 0.0, 0.0 ], 0 ) == [ 128, 128 ]
print geoToPixel( [ 0.0, 0.0 ], 1 ) == [ 256, 256 ]
print geoToPixel( [ -60.0, 45.0 ], 0 ) == [ 85, 92 ]
print geoToPixel( [ -60.0, 45.0 ], 1 ) == [ 170, 184 ]
|
<commit_before><commit_msg>Add geographic coordinte to pixel coordinate converter<commit_after>
|
#!/usr/bin/env python
import math
def geoToPixel( point, zoom, tilesize=256 ):
lng = point[0]
if lng > 180.0: lng -= 360.0
lng = lng / 360.0 + 0.5
lat = point[1]
lat = 0.5 - ( math.log( math.tan( ( math.pi / 4.0 ) + ( lat * math.pi / 360.0 ) ) ) / math.pi / 2.0 );
scale = ( 1 << zoom ) * tilesize
return [ int( lng * scale ), int( lat * scale ) ]
print geoToPixel( [ 0.0, 0.0 ], 0 ) == [ 128, 128 ]
print geoToPixel( [ 0.0, 0.0 ], 1 ) == [ 256, 256 ]
print geoToPixel( [ -60.0, 45.0 ], 0 ) == [ 85, 92 ]
print geoToPixel( [ -60.0, 45.0 ], 1 ) == [ 170, 184 ]
|
Add geographic coordinte to pixel coordinate converter#!/usr/bin/env python
import math
def geoToPixel( point, zoom, tilesize=256 ):
lng = point[0]
if lng > 180.0: lng -= 360.0
lng = lng / 360.0 + 0.5
lat = point[1]
lat = 0.5 - ( math.log( math.tan( ( math.pi / 4.0 ) + ( lat * math.pi / 360.0 ) ) ) / math.pi / 2.0 );
scale = ( 1 << zoom ) * tilesize
return [ int( lng * scale ), int( lat * scale ) ]
print geoToPixel( [ 0.0, 0.0 ], 0 ) == [ 128, 128 ]
print geoToPixel( [ 0.0, 0.0 ], 1 ) == [ 256, 256 ]
print geoToPixel( [ -60.0, 45.0 ], 0 ) == [ 85, 92 ]
print geoToPixel( [ -60.0, 45.0 ], 1 ) == [ 170, 184 ]
|
<commit_before><commit_msg>Add geographic coordinte to pixel coordinate converter<commit_after>#!/usr/bin/env python
import math
def geoToPixel( point, zoom, tilesize=256 ):
lng = point[0]
if lng > 180.0: lng -= 360.0
lng = lng / 360.0 + 0.5
lat = point[1]
lat = 0.5 - ( math.log( math.tan( ( math.pi / 4.0 ) + ( lat * math.pi / 360.0 ) ) ) / math.pi / 2.0 );
scale = ( 1 << zoom ) * tilesize
return [ int( lng * scale ), int( lat * scale ) ]
print geoToPixel( [ 0.0, 0.0 ], 0 ) == [ 128, 128 ]
print geoToPixel( [ 0.0, 0.0 ], 1 ) == [ 256, 256 ]
print geoToPixel( [ -60.0, 45.0 ], 0 ) == [ 85, 92 ]
print geoToPixel( [ -60.0, 45.0 ], 1 ) == [ 170, 184 ]
|
|
cff64915aaaee0aff3cec7b918cd4a008b327912
|
chipy_org/urls.py
|
chipy_org/urls.py
|
from django.conf import settings
from django.conf.urls.defaults import *
from django.views.generic.simple import direct_to_template
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.contrib import admin
from contact.views import ChipyContactView
admin.autodiscover()
urlpatterns = patterns("",
url(r'', include('main.urls')),
url(r'', include('social_auth.urls')),
url(r'^login', direct_to_template, {
'template': 'login.html'
}),
(r'^grappelli/', include('grappelli.urls')),
url(r'^meetings/', include('meetings.urls')),
url(r'^profiles/', include('profiles.urls', namespace="profiles")),
url(r'^admin/', include(admin.site.urls)),
url(r'^about/', include('about.urls')),
url(r'^logout', 'django.contrib.auth.views.logout',
{'next_page': '/'}),
url(r'^contact/', ChipyContactView.as_view(), name="contact"),
url(r'^tinymce/', include('tinymce.urls')),
url(r'^pages/', include('django.contrib.flatpages.urls')),
)
if settings.SERVE_MEDIA:
urlpatterns += patterns("",
url(r'^site_media/media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),
)
urlpatterns += staticfiles_urlpatterns()
|
from django.conf import settings
from django.conf.urls.defaults import *
from django.views.generic.simple import direct_to_template
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.contrib import admin
from contact.views import ChipyContactView
admin.autodiscover()
urlpatterns = patterns("",
url(r'', include('main.urls')),
url(r'', include('social_auth.urls')),
url(r'^login/{0,1}$', direct_to_template, {
'template': 'login.html'
}),
(r'^grappelli/', include('grappelli.urls')),
url(r'^meetings/', include('meetings.urls')),
url(r'^profiles/', include('profiles.urls', namespace="profiles")),
url(r'^admin/', include(admin.site.urls)),
url(r'^about/', include('about.urls')),
url(r'^logout', 'django.contrib.auth.views.logout',
{'next_page': '/'}),
url(r'^contact/', ChipyContactView.as_view(), name="contact"),
url(r'^tinymce/', include('tinymce.urls')),
url(r'^pages/', include('django.contrib.flatpages.urls')),
)
if settings.SERVE_MEDIA:
urlpatterns += patterns("",
url(r'^site_media/media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),
)
urlpatterns += staticfiles_urlpatterns()
|
Set for zero or one slash
|
Set for zero or one slash
|
Python
|
mit
|
bharathelangovan/chipy.org,agfor/chipy.org,brianray/chipy.org,brianray/chipy.org,agfor/chipy.org,chicagopython/chipy.org,tanyaschlusser/chipy.org,tanyaschlusser/chipy.org,bharathelangovan/chipy.org,agfor/chipy.org,chicagopython/chipy.org,tanyaschlusser/chipy.org,brianray/chipy.org,chicagopython/chipy.org,bharathelangovan/chipy.org,chicagopython/chipy.org
|
from django.conf import settings
from django.conf.urls.defaults import *
from django.views.generic.simple import direct_to_template
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.contrib import admin
from contact.views import ChipyContactView
admin.autodiscover()
urlpatterns = patterns("",
url(r'', include('main.urls')),
url(r'', include('social_auth.urls')),
url(r'^login', direct_to_template, {
'template': 'login.html'
}),
(r'^grappelli/', include('grappelli.urls')),
url(r'^meetings/', include('meetings.urls')),
url(r'^profiles/', include('profiles.urls', namespace="profiles")),
url(r'^admin/', include(admin.site.urls)),
url(r'^about/', include('about.urls')),
url(r'^logout', 'django.contrib.auth.views.logout',
{'next_page': '/'}),
url(r'^contact/', ChipyContactView.as_view(), name="contact"),
url(r'^tinymce/', include('tinymce.urls')),
url(r'^pages/', include('django.contrib.flatpages.urls')),
)
if settings.SERVE_MEDIA:
urlpatterns += patterns("",
url(r'^site_media/media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),
)
urlpatterns += staticfiles_urlpatterns()
Set for zero or one slash
|
from django.conf import settings
from django.conf.urls.defaults import *
from django.views.generic.simple import direct_to_template
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.contrib import admin
from contact.views import ChipyContactView
admin.autodiscover()
urlpatterns = patterns("",
url(r'', include('main.urls')),
url(r'', include('social_auth.urls')),
url(r'^login/{0,1}$', direct_to_template, {
'template': 'login.html'
}),
(r'^grappelli/', include('grappelli.urls')),
url(r'^meetings/', include('meetings.urls')),
url(r'^profiles/', include('profiles.urls', namespace="profiles")),
url(r'^admin/', include(admin.site.urls)),
url(r'^about/', include('about.urls')),
url(r'^logout', 'django.contrib.auth.views.logout',
{'next_page': '/'}),
url(r'^contact/', ChipyContactView.as_view(), name="contact"),
url(r'^tinymce/', include('tinymce.urls')),
url(r'^pages/', include('django.contrib.flatpages.urls')),
)
if settings.SERVE_MEDIA:
urlpatterns += patterns("",
url(r'^site_media/media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),
)
urlpatterns += staticfiles_urlpatterns()
|
<commit_before>from django.conf import settings
from django.conf.urls.defaults import *
from django.views.generic.simple import direct_to_template
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.contrib import admin
from contact.views import ChipyContactView
admin.autodiscover()
urlpatterns = patterns("",
url(r'', include('main.urls')),
url(r'', include('social_auth.urls')),
url(r'^login', direct_to_template, {
'template': 'login.html'
}),
(r'^grappelli/', include('grappelli.urls')),
url(r'^meetings/', include('meetings.urls')),
url(r'^profiles/', include('profiles.urls', namespace="profiles")),
url(r'^admin/', include(admin.site.urls)),
url(r'^about/', include('about.urls')),
url(r'^logout', 'django.contrib.auth.views.logout',
{'next_page': '/'}),
url(r'^contact/', ChipyContactView.as_view(), name="contact"),
url(r'^tinymce/', include('tinymce.urls')),
url(r'^pages/', include('django.contrib.flatpages.urls')),
)
if settings.SERVE_MEDIA:
urlpatterns += patterns("",
url(r'^site_media/media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),
)
urlpatterns += staticfiles_urlpatterns()
<commit_msg>Set for zero or one slash<commit_after>
|
from django.conf import settings
from django.conf.urls.defaults import *
from django.views.generic.simple import direct_to_template
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.contrib import admin
from contact.views import ChipyContactView
admin.autodiscover()
urlpatterns = patterns("",
url(r'', include('main.urls')),
url(r'', include('social_auth.urls')),
url(r'^login/{0,1}$', direct_to_template, {
'template': 'login.html'
}),
(r'^grappelli/', include('grappelli.urls')),
url(r'^meetings/', include('meetings.urls')),
url(r'^profiles/', include('profiles.urls', namespace="profiles")),
url(r'^admin/', include(admin.site.urls)),
url(r'^about/', include('about.urls')),
url(r'^logout', 'django.contrib.auth.views.logout',
{'next_page': '/'}),
url(r'^contact/', ChipyContactView.as_view(), name="contact"),
url(r'^tinymce/', include('tinymce.urls')),
url(r'^pages/', include('django.contrib.flatpages.urls')),
)
if settings.SERVE_MEDIA:
urlpatterns += patterns("",
url(r'^site_media/media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),
)
urlpatterns += staticfiles_urlpatterns()
|
from django.conf import settings
from django.conf.urls.defaults import *
from django.views.generic.simple import direct_to_template
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.contrib import admin
from contact.views import ChipyContactView
admin.autodiscover()
urlpatterns = patterns("",
url(r'', include('main.urls')),
url(r'', include('social_auth.urls')),
url(r'^login', direct_to_template, {
'template': 'login.html'
}),
(r'^grappelli/', include('grappelli.urls')),
url(r'^meetings/', include('meetings.urls')),
url(r'^profiles/', include('profiles.urls', namespace="profiles")),
url(r'^admin/', include(admin.site.urls)),
url(r'^about/', include('about.urls')),
url(r'^logout', 'django.contrib.auth.views.logout',
{'next_page': '/'}),
url(r'^contact/', ChipyContactView.as_view(), name="contact"),
url(r'^tinymce/', include('tinymce.urls')),
url(r'^pages/', include('django.contrib.flatpages.urls')),
)
if settings.SERVE_MEDIA:
urlpatterns += patterns("",
url(r'^site_media/media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),
)
urlpatterns += staticfiles_urlpatterns()
Set for zero or one slashfrom django.conf import settings
from django.conf.urls.defaults import *
from django.views.generic.simple import direct_to_template
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.contrib import admin
from contact.views import ChipyContactView
admin.autodiscover()
urlpatterns = patterns("",
url(r'', include('main.urls')),
url(r'', include('social_auth.urls')),
url(r'^login/{0,1}$', direct_to_template, {
'template': 'login.html'
}),
(r'^grappelli/', include('grappelli.urls')),
url(r'^meetings/', include('meetings.urls')),
url(r'^profiles/', include('profiles.urls', namespace="profiles")),
url(r'^admin/', include(admin.site.urls)),
url(r'^about/', include('about.urls')),
url(r'^logout', 'django.contrib.auth.views.logout',
{'next_page': '/'}),
url(r'^contact/', ChipyContactView.as_view(), name="contact"),
url(r'^tinymce/', include('tinymce.urls')),
url(r'^pages/', include('django.contrib.flatpages.urls')),
)
if settings.SERVE_MEDIA:
urlpatterns += patterns("",
url(r'^site_media/media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),
)
urlpatterns += staticfiles_urlpatterns()
|
<commit_before>from django.conf import settings
from django.conf.urls.defaults import *
from django.views.generic.simple import direct_to_template
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.contrib import admin
from contact.views import ChipyContactView
admin.autodiscover()
urlpatterns = patterns("",
url(r'', include('main.urls')),
url(r'', include('social_auth.urls')),
url(r'^login', direct_to_template, {
'template': 'login.html'
}),
(r'^grappelli/', include('grappelli.urls')),
url(r'^meetings/', include('meetings.urls')),
url(r'^profiles/', include('profiles.urls', namespace="profiles")),
url(r'^admin/', include(admin.site.urls)),
url(r'^about/', include('about.urls')),
url(r'^logout', 'django.contrib.auth.views.logout',
{'next_page': '/'}),
url(r'^contact/', ChipyContactView.as_view(), name="contact"),
url(r'^tinymce/', include('tinymce.urls')),
url(r'^pages/', include('django.contrib.flatpages.urls')),
)
if settings.SERVE_MEDIA:
urlpatterns += patterns("",
url(r'^site_media/media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),
)
urlpatterns += staticfiles_urlpatterns()
<commit_msg>Set for zero or one slash<commit_after>from django.conf import settings
from django.conf.urls.defaults import *
from django.views.generic.simple import direct_to_template
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.contrib import admin
from contact.views import ChipyContactView
admin.autodiscover()
urlpatterns = patterns("",
url(r'', include('main.urls')),
url(r'', include('social_auth.urls')),
url(r'^login/{0,1}$', direct_to_template, {
'template': 'login.html'
}),
(r'^grappelli/', include('grappelli.urls')),
url(r'^meetings/', include('meetings.urls')),
url(r'^profiles/', include('profiles.urls', namespace="profiles")),
url(r'^admin/', include(admin.site.urls)),
url(r'^about/', include('about.urls')),
url(r'^logout', 'django.contrib.auth.views.logout',
{'next_page': '/'}),
url(r'^contact/', ChipyContactView.as_view(), name="contact"),
url(r'^tinymce/', include('tinymce.urls')),
url(r'^pages/', include('django.contrib.flatpages.urls')),
)
if settings.SERVE_MEDIA:
urlpatterns += patterns("",
url(r'^site_media/media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),
)
urlpatterns += staticfiles_urlpatterns()
|
60353a2ee3c54b68d83c6e5b55ef298388f81a5c
|
mcdowell/src/main/python/ch1/arrays.py
|
mcdowell/src/main/python/ch1/arrays.py
|
def unique(string):
counter = {}
for c in string:
if c in counter:
return False
else:
counter[c] = 1
else:
return True
def reverse(string):
result = []
for i in range(len(string)):
result.append(string[-(i+1)])
return "".join(result)
def is_permutation(str1, str2):
if len(str1) != len(str2):
return False
counter = {}
for i in range(len(str1)):
if str1[i] in counter:
counter[str1[i]] += 1
else:
counter[str1[i]] = 1
if str2[i] in counter:
counter[str2[i]] -= 1
else:
counter[str2[i]] = -1
for k in counter:
if counter[k] != 0:
return False
else:
return True
def replace_spaces(array, end):
rshift = len(array) - end - 1
for i in range(end, -1, -1):
if array[i] == " ":
array[i+rshift-2:i+rshift+1] = ["%", "2", "0"]
rshift -= 2
else:
array[i+rshift] = array[i]
return "".join(array)
|
Rename module and add replace_spaces.
|
Rename module and add replace_spaces.
|
Python
|
mit
|
jamesewoo/tigeruppercut,jamesewoo/tigeruppercut
|
Rename module and add replace_spaces.
|
def unique(string):
counter = {}
for c in string:
if c in counter:
return False
else:
counter[c] = 1
else:
return True
def reverse(string):
result = []
for i in range(len(string)):
result.append(string[-(i+1)])
return "".join(result)
def is_permutation(str1, str2):
if len(str1) != len(str2):
return False
counter = {}
for i in range(len(str1)):
if str1[i] in counter:
counter[str1[i]] += 1
else:
counter[str1[i]] = 1
if str2[i] in counter:
counter[str2[i]] -= 1
else:
counter[str2[i]] = -1
for k in counter:
if counter[k] != 0:
return False
else:
return True
def replace_spaces(array, end):
rshift = len(array) - end - 1
for i in range(end, -1, -1):
if array[i] == " ":
array[i+rshift-2:i+rshift+1] = ["%", "2", "0"]
rshift -= 2
else:
array[i+rshift] = array[i]
return "".join(array)
|
<commit_before><commit_msg>Rename module and add replace_spaces.<commit_after>
|
def unique(string):
counter = {}
for c in string:
if c in counter:
return False
else:
counter[c] = 1
else:
return True
def reverse(string):
result = []
for i in range(len(string)):
result.append(string[-(i+1)])
return "".join(result)
def is_permutation(str1, str2):
if len(str1) != len(str2):
return False
counter = {}
for i in range(len(str1)):
if str1[i] in counter:
counter[str1[i]] += 1
else:
counter[str1[i]] = 1
if str2[i] in counter:
counter[str2[i]] -= 1
else:
counter[str2[i]] = -1
for k in counter:
if counter[k] != 0:
return False
else:
return True
def replace_spaces(array, end):
rshift = len(array) - end - 1
for i in range(end, -1, -1):
if array[i] == " ":
array[i+rshift-2:i+rshift+1] = ["%", "2", "0"]
rshift -= 2
else:
array[i+rshift] = array[i]
return "".join(array)
|
Rename module and add replace_spaces.def unique(string):
counter = {}
for c in string:
if c in counter:
return False
else:
counter[c] = 1
else:
return True
def reverse(string):
result = []
for i in range(len(string)):
result.append(string[-(i+1)])
return "".join(result)
def is_permutation(str1, str2):
if len(str1) != len(str2):
return False
counter = {}
for i in range(len(str1)):
if str1[i] in counter:
counter[str1[i]] += 1
else:
counter[str1[i]] = 1
if str2[i] in counter:
counter[str2[i]] -= 1
else:
counter[str2[i]] = -1
for k in counter:
if counter[k] != 0:
return False
else:
return True
def replace_spaces(array, end):
rshift = len(array) - end - 1
for i in range(end, -1, -1):
if array[i] == " ":
array[i+rshift-2:i+rshift+1] = ["%", "2", "0"]
rshift -= 2
else:
array[i+rshift] = array[i]
return "".join(array)
|
<commit_before><commit_msg>Rename module and add replace_spaces.<commit_after>def unique(string):
counter = {}
for c in string:
if c in counter:
return False
else:
counter[c] = 1
else:
return True
def reverse(string):
result = []
for i in range(len(string)):
result.append(string[-(i+1)])
return "".join(result)
def is_permutation(str1, str2):
if len(str1) != len(str2):
return False
counter = {}
for i in range(len(str1)):
if str1[i] in counter:
counter[str1[i]] += 1
else:
counter[str1[i]] = 1
if str2[i] in counter:
counter[str2[i]] -= 1
else:
counter[str2[i]] = -1
for k in counter:
if counter[k] != 0:
return False
else:
return True
def replace_spaces(array, end):
rshift = len(array) - end - 1
for i in range(end, -1, -1):
if array[i] == " ":
array[i+rshift-2:i+rshift+1] = ["%", "2", "0"]
rshift -= 2
else:
array[i+rshift] = array[i]
return "".join(array)
|
|
f406a955784adf14583c3855e175fddeffc94250
|
fixlib/couch.py
|
fixlib/couch.py
|
import fix42
import couchdb
import copy
class Store(object):
def __init__(self, *args):
self.db = couchdb.Server(args[0])[args[1]]
self._last = None
@property
def last(self):
if self._last is not None:
return self._last
cur = self.db.view('seq/in', descending=True, limit=1)
inc = cur.rows[0].key if cur.rows else 0
cur = self.db.view('seq/out', descending=True, limit=1)
out = cur.rows[0].key if cur.rows else 0
self._last = [inc, out]
return self._last
def _encode(self, msg):
msg = copy.copy(msg)
for k, v in msg.iteritems():
if fix42.nojson(k):
msg[k] = str(v)
return msg
def get(self, dir, seq):
return self.db.get('%s-%s' % (dir, seq))
def save(self, dir, msg):
msg = self._encode(msg)
msg['_id'] = '%s-%s' % (dir, msg['MsgSeqNum'])
lkey = {'in': 0, 'out': 1}[dir]
if self._last[lkey] < msg['MsgSeqNum']:
self._last[lkey] = msg['MsgSeqNum']
if msg['_id'] not in self.db:
self.db.update([msg])
|
Move CouchDB-based Store into separate module.
|
Move CouchDB-based Store into separate module.
|
Python
|
bsd-3-clause
|
djc/fixlib,jvirtanen/fixlib
|
Move CouchDB-based Store into separate module.
|
import fix42
import couchdb
import copy
class Store(object):
def __init__(self, *args):
self.db = couchdb.Server(args[0])[args[1]]
self._last = None
@property
def last(self):
if self._last is not None:
return self._last
cur = self.db.view('seq/in', descending=True, limit=1)
inc = cur.rows[0].key if cur.rows else 0
cur = self.db.view('seq/out', descending=True, limit=1)
out = cur.rows[0].key if cur.rows else 0
self._last = [inc, out]
return self._last
def _encode(self, msg):
msg = copy.copy(msg)
for k, v in msg.iteritems():
if fix42.nojson(k):
msg[k] = str(v)
return msg
def get(self, dir, seq):
return self.db.get('%s-%s' % (dir, seq))
def save(self, dir, msg):
msg = self._encode(msg)
msg['_id'] = '%s-%s' % (dir, msg['MsgSeqNum'])
lkey = {'in': 0, 'out': 1}[dir]
if self._last[lkey] < msg['MsgSeqNum']:
self._last[lkey] = msg['MsgSeqNum']
if msg['_id'] not in self.db:
self.db.update([msg])
|
<commit_before><commit_msg>Move CouchDB-based Store into separate module.<commit_after>
|
import fix42
import couchdb
import copy
class Store(object):
def __init__(self, *args):
self.db = couchdb.Server(args[0])[args[1]]
self._last = None
@property
def last(self):
if self._last is not None:
return self._last
cur = self.db.view('seq/in', descending=True, limit=1)
inc = cur.rows[0].key if cur.rows else 0
cur = self.db.view('seq/out', descending=True, limit=1)
out = cur.rows[0].key if cur.rows else 0
self._last = [inc, out]
return self._last
def _encode(self, msg):
msg = copy.copy(msg)
for k, v in msg.iteritems():
if fix42.nojson(k):
msg[k] = str(v)
return msg
def get(self, dir, seq):
return self.db.get('%s-%s' % (dir, seq))
def save(self, dir, msg):
msg = self._encode(msg)
msg['_id'] = '%s-%s' % (dir, msg['MsgSeqNum'])
lkey = {'in': 0, 'out': 1}[dir]
if self._last[lkey] < msg['MsgSeqNum']:
self._last[lkey] = msg['MsgSeqNum']
if msg['_id'] not in self.db:
self.db.update([msg])
|
Move CouchDB-based Store into separate module.import fix42
import couchdb
import copy
class Store(object):
def __init__(self, *args):
self.db = couchdb.Server(args[0])[args[1]]
self._last = None
@property
def last(self):
if self._last is not None:
return self._last
cur = self.db.view('seq/in', descending=True, limit=1)
inc = cur.rows[0].key if cur.rows else 0
cur = self.db.view('seq/out', descending=True, limit=1)
out = cur.rows[0].key if cur.rows else 0
self._last = [inc, out]
return self._last
def _encode(self, msg):
msg = copy.copy(msg)
for k, v in msg.iteritems():
if fix42.nojson(k):
msg[k] = str(v)
return msg
def get(self, dir, seq):
return self.db.get('%s-%s' % (dir, seq))
def save(self, dir, msg):
msg = self._encode(msg)
msg['_id'] = '%s-%s' % (dir, msg['MsgSeqNum'])
lkey = {'in': 0, 'out': 1}[dir]
if self._last[lkey] < msg['MsgSeqNum']:
self._last[lkey] = msg['MsgSeqNum']
if msg['_id'] not in self.db:
self.db.update([msg])
|
<commit_before><commit_msg>Move CouchDB-based Store into separate module.<commit_after>import fix42
import couchdb
import copy
class Store(object):
def __init__(self, *args):
self.db = couchdb.Server(args[0])[args[1]]
self._last = None
@property
def last(self):
if self._last is not None:
return self._last
cur = self.db.view('seq/in', descending=True, limit=1)
inc = cur.rows[0].key if cur.rows else 0
cur = self.db.view('seq/out', descending=True, limit=1)
out = cur.rows[0].key if cur.rows else 0
self._last = [inc, out]
return self._last
def _encode(self, msg):
msg = copy.copy(msg)
for k, v in msg.iteritems():
if fix42.nojson(k):
msg[k] = str(v)
return msg
def get(self, dir, seq):
return self.db.get('%s-%s' % (dir, seq))
def save(self, dir, msg):
msg = self._encode(msg)
msg['_id'] = '%s-%s' % (dir, msg['MsgSeqNum'])
lkey = {'in': 0, 'out': 1}[dir]
if self._last[lkey] < msg['MsgSeqNum']:
self._last[lkey] = msg['MsgSeqNum']
if msg['_id'] not in self.db:
self.db.update([msg])
|
|
e2b86299738a726b5bec0a2441426ed4651d9a26
|
dmoj/executors/JAVA10.py
|
dmoj/executors/JAVA10.py
|
from dmoj.executors.java_executor import JavacExecutor
class Executor(JavacExecutor):
compiler = 'javac10'
vm = 'java10'
name = 'JAVA10'
jvm_regex = r'java-10-|openjdk10'
test_program = '''\
import java.io.IOException;
interface IORunnable {
public void run() throws IOException;
}
public class self_test {
public static void run(IORunnable target) throws IOException {
target.run();
}
public static void main(String[] args) throws IOException {
run(() -> {
var buffer = new byte[4096];
int read;
while ((read = System.in.read(buffer)) >= 0)
System.out.write(buffer, 0, read);
});
}
}'''
def get_compile_args(self):
return [self.get_compiler(), '-encoding', 'UTF-8', self._code]
|
Add Java 10 (EAP) executor support
|
Add Java 10 (EAP) executor support
|
Python
|
agpl-3.0
|
DMOJ/judge,DMOJ/judge,DMOJ/judge
|
Add Java 10 (EAP) executor support
|
from dmoj.executors.java_executor import JavacExecutor
class Executor(JavacExecutor):
compiler = 'javac10'
vm = 'java10'
name = 'JAVA10'
jvm_regex = r'java-10-|openjdk10'
test_program = '''\
import java.io.IOException;
interface IORunnable {
public void run() throws IOException;
}
public class self_test {
public static void run(IORunnable target) throws IOException {
target.run();
}
public static void main(String[] args) throws IOException {
run(() -> {
var buffer = new byte[4096];
int read;
while ((read = System.in.read(buffer)) >= 0)
System.out.write(buffer, 0, read);
});
}
}'''
def get_compile_args(self):
return [self.get_compiler(), '-encoding', 'UTF-8', self._code]
|
<commit_before><commit_msg>Add Java 10 (EAP) executor support<commit_after>
|
from dmoj.executors.java_executor import JavacExecutor
class Executor(JavacExecutor):
compiler = 'javac10'
vm = 'java10'
name = 'JAVA10'
jvm_regex = r'java-10-|openjdk10'
test_program = '''\
import java.io.IOException;
interface IORunnable {
public void run() throws IOException;
}
public class self_test {
public static void run(IORunnable target) throws IOException {
target.run();
}
public static void main(String[] args) throws IOException {
run(() -> {
var buffer = new byte[4096];
int read;
while ((read = System.in.read(buffer)) >= 0)
System.out.write(buffer, 0, read);
});
}
}'''
def get_compile_args(self):
return [self.get_compiler(), '-encoding', 'UTF-8', self._code]
|
Add Java 10 (EAP) executor supportfrom dmoj.executors.java_executor import JavacExecutor
class Executor(JavacExecutor):
compiler = 'javac10'
vm = 'java10'
name = 'JAVA10'
jvm_regex = r'java-10-|openjdk10'
test_program = '''\
import java.io.IOException;
interface IORunnable {
public void run() throws IOException;
}
public class self_test {
public static void run(IORunnable target) throws IOException {
target.run();
}
public static void main(String[] args) throws IOException {
run(() -> {
var buffer = new byte[4096];
int read;
while ((read = System.in.read(buffer)) >= 0)
System.out.write(buffer, 0, read);
});
}
}'''
def get_compile_args(self):
return [self.get_compiler(), '-encoding', 'UTF-8', self._code]
|
<commit_before><commit_msg>Add Java 10 (EAP) executor support<commit_after>from dmoj.executors.java_executor import JavacExecutor
class Executor(JavacExecutor):
compiler = 'javac10'
vm = 'java10'
name = 'JAVA10'
jvm_regex = r'java-10-|openjdk10'
test_program = '''\
import java.io.IOException;
interface IORunnable {
public void run() throws IOException;
}
public class self_test {
public static void run(IORunnable target) throws IOException {
target.run();
}
public static void main(String[] args) throws IOException {
run(() -> {
var buffer = new byte[4096];
int read;
while ((read = System.in.read(buffer)) >= 0)
System.out.write(buffer, 0, read);
});
}
}'''
def get_compile_args(self):
return [self.get_compiler(), '-encoding', 'UTF-8', self._code]
|
|
01cb4195bffaeb0ab264fd8d9ee390492312ef15
|
Problems/spiralMatrix.py
|
Problems/spiralMatrix.py
|
#!/Applications/anaconda/envs/Python3/bin
def main():
# Test suite
matrix = [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
]
spiral = spiral_order(matrix)
print(spiral)
def spiral_order(matrix):
'''
Given an mxn matrix, returns the elements in spiral order
Input: list of m lists, with n elements per list
Output: list of elements in spiral order
matrix = [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
]
returns: [1, 2, 3, 6, 9, 8, 7, 4, 5]
'''
rows = len(matrix)
cols = len(matrix[0])
direction = 0 # 0: right, 1: down, 2: left, 3: up
# Index for each edge of matrix
top = 0
bottom = rows - 1
left = 0
right = cols - 1
spiral = []
while (left <= right and top <= bottom):
if direction == 0:
# Heading left to right
for x in range(left, right+1):
spiral.append(matrix[top][x])
top += 1
elif direction == 1:
# Heading top to bottom
for x in range(top, bottom+1):
spiral.append(matrix[x][right])
right -= 1
elif direction == 2:
# Heading right to left
for x in range(right, left-1, -1):
spiral.append(matrix[bottom][x])
bottom -= 1
elif direction == 3:
# Heading bottom to top
for x in range(bottom, top-1, -1):
spiral.append(matrix[x][left])
left += 1
direction = (direction + 1) % 4
return spiral
if __name__ == '__main__':
main()
|
Add spiral order of matrix problem
|
Add spiral order of matrix problem
|
Python
|
mit
|
HKuz/Test_Code
|
Add spiral order of matrix problem
|
#!/Applications/anaconda/envs/Python3/bin
def main():
# Test suite
matrix = [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
]
spiral = spiral_order(matrix)
print(spiral)
def spiral_order(matrix):
'''
Given an mxn matrix, returns the elements in spiral order
Input: list of m lists, with n elements per list
Output: list of elements in spiral order
matrix = [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
]
returns: [1, 2, 3, 6, 9, 8, 7, 4, 5]
'''
rows = len(matrix)
cols = len(matrix[0])
direction = 0 # 0: right, 1: down, 2: left, 3: up
# Index for each edge of matrix
top = 0
bottom = rows - 1
left = 0
right = cols - 1
spiral = []
while (left <= right and top <= bottom):
if direction == 0:
# Heading left to right
for x in range(left, right+1):
spiral.append(matrix[top][x])
top += 1
elif direction == 1:
# Heading top to bottom
for x in range(top, bottom+1):
spiral.append(matrix[x][right])
right -= 1
elif direction == 2:
# Heading right to left
for x in range(right, left-1, -1):
spiral.append(matrix[bottom][x])
bottom -= 1
elif direction == 3:
# Heading bottom to top
for x in range(bottom, top-1, -1):
spiral.append(matrix[x][left])
left += 1
direction = (direction + 1) % 4
return spiral
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add spiral order of matrix problem<commit_after>
|
#!/Applications/anaconda/envs/Python3/bin
def main():
# Test suite
matrix = [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
]
spiral = spiral_order(matrix)
print(spiral)
def spiral_order(matrix):
'''
Given an mxn matrix, returns the elements in spiral order
Input: list of m lists, with n elements per list
Output: list of elements in spiral order
matrix = [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
]
returns: [1, 2, 3, 6, 9, 8, 7, 4, 5]
'''
rows = len(matrix)
cols = len(matrix[0])
direction = 0 # 0: right, 1: down, 2: left, 3: up
# Index for each edge of matrix
top = 0
bottom = rows - 1
left = 0
right = cols - 1
spiral = []
while (left <= right and top <= bottom):
if direction == 0:
# Heading left to right
for x in range(left, right+1):
spiral.append(matrix[top][x])
top += 1
elif direction == 1:
# Heading top to bottom
for x in range(top, bottom+1):
spiral.append(matrix[x][right])
right -= 1
elif direction == 2:
# Heading right to left
for x in range(right, left-1, -1):
spiral.append(matrix[bottom][x])
bottom -= 1
elif direction == 3:
# Heading bottom to top
for x in range(bottom, top-1, -1):
spiral.append(matrix[x][left])
left += 1
direction = (direction + 1) % 4
return spiral
if __name__ == '__main__':
main()
|
Add spiral order of matrix problem#!/Applications/anaconda/envs/Python3/bin
def main():
# Test suite
matrix = [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
]
spiral = spiral_order(matrix)
print(spiral)
def spiral_order(matrix):
'''
Given an mxn matrix, returns the elements in spiral order
Input: list of m lists, with n elements per list
Output: list of elements in spiral order
matrix = [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
]
returns: [1, 2, 3, 6, 9, 8, 7, 4, 5]
'''
rows = len(matrix)
cols = len(matrix[0])
direction = 0 # 0: right, 1: down, 2: left, 3: up
# Index for each edge of matrix
top = 0
bottom = rows - 1
left = 0
right = cols - 1
spiral = []
while (left <= right and top <= bottom):
if direction == 0:
# Heading left to right
for x in range(left, right+1):
spiral.append(matrix[top][x])
top += 1
elif direction == 1:
# Heading top to bottom
for x in range(top, bottom+1):
spiral.append(matrix[x][right])
right -= 1
elif direction == 2:
# Heading right to left
for x in range(right, left-1, -1):
spiral.append(matrix[bottom][x])
bottom -= 1
elif direction == 3:
# Heading bottom to top
for x in range(bottom, top-1, -1):
spiral.append(matrix[x][left])
left += 1
direction = (direction + 1) % 4
return spiral
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add spiral order of matrix problem<commit_after>#!/Applications/anaconda/envs/Python3/bin
def main():
# Test suite
matrix = [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
]
spiral = spiral_order(matrix)
print(spiral)
def spiral_order(matrix):
'''
Given an mxn matrix, returns the elements in spiral order
Input: list of m lists, with n elements per list
Output: list of elements in spiral order
matrix = [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
]
returns: [1, 2, 3, 6, 9, 8, 7, 4, 5]
'''
rows = len(matrix)
cols = len(matrix[0])
direction = 0 # 0: right, 1: down, 2: left, 3: up
# Index for each edge of matrix
top = 0
bottom = rows - 1
left = 0
right = cols - 1
spiral = []
while (left <= right and top <= bottom):
if direction == 0:
# Heading left to right
for x in range(left, right+1):
spiral.append(matrix[top][x])
top += 1
elif direction == 1:
# Heading top to bottom
for x in range(top, bottom+1):
spiral.append(matrix[x][right])
right -= 1
elif direction == 2:
# Heading right to left
for x in range(right, left-1, -1):
spiral.append(matrix[bottom][x])
bottom -= 1
elif direction == 3:
# Heading bottom to top
for x in range(bottom, top-1, -1):
spiral.append(matrix[x][left])
left += 1
direction = (direction + 1) % 4
return spiral
if __name__ == '__main__':
main()
|
|
da1bb3a29b9c3da41bf6479563118922bec7f9ba
|
tests/test_grid.py
|
tests/test_grid.py
|
from parcels import Grid
import numpy as np
import pytest
@pytest.mark.parametrize('xdim', [100, 200])
@pytest.mark.parametrize('ydim', [100, 200])
def test_grid_from_data(xdim, ydim):
lon = np.linspace(0., 1., xdim, dtype=np.float32)
lat = np.linspace(0., 1., ydim, dtype=np.float32)
depth = np.zeros(1, dtype=np.float32)
time = np.zeros(1, dtype=np.float64)
u, v = np.meshgrid(lon, lat)
grid = Grid.from_data(u, lon, lat, v, lon, lat, depth, time)
u_t = np.transpose(u).reshape((lat.size, lon.size))
v_t = np.transpose(v).reshape((lat.size, lon.size))
assert len(grid.U.data.shape) == 3 # Will be 4 once we use depth
assert len(grid.V.data.shape) == 3
assert np.allclose(grid.U.data[0, :], u_t, rtol=1e-12)
assert np.allclose(grid.V.data[0, :], v_t, rtol=1e-12)
|
Add a baseline test for creating grids from data
|
Grid: Add a baseline test for creating grids from data
|
Python
|
mit
|
OceanPARCELS/parcels,OceanPARCELS/parcels
|
Grid: Add a baseline test for creating grids from data
|
from parcels import Grid
import numpy as np
import pytest
@pytest.mark.parametrize('xdim', [100, 200])
@pytest.mark.parametrize('ydim', [100, 200])
def test_grid_from_data(xdim, ydim):
lon = np.linspace(0., 1., xdim, dtype=np.float32)
lat = np.linspace(0., 1., ydim, dtype=np.float32)
depth = np.zeros(1, dtype=np.float32)
time = np.zeros(1, dtype=np.float64)
u, v = np.meshgrid(lon, lat)
grid = Grid.from_data(u, lon, lat, v, lon, lat, depth, time)
u_t = np.transpose(u).reshape((lat.size, lon.size))
v_t = np.transpose(v).reshape((lat.size, lon.size))
assert len(grid.U.data.shape) == 3 # Will be 4 once we use depth
assert len(grid.V.data.shape) == 3
assert np.allclose(grid.U.data[0, :], u_t, rtol=1e-12)
assert np.allclose(grid.V.data[0, :], v_t, rtol=1e-12)
|
<commit_before><commit_msg>Grid: Add a baseline test for creating grids from data<commit_after>
|
from parcels import Grid
import numpy as np
import pytest
@pytest.mark.parametrize('xdim', [100, 200])
@pytest.mark.parametrize('ydim', [100, 200])
def test_grid_from_data(xdim, ydim):
lon = np.linspace(0., 1., xdim, dtype=np.float32)
lat = np.linspace(0., 1., ydim, dtype=np.float32)
depth = np.zeros(1, dtype=np.float32)
time = np.zeros(1, dtype=np.float64)
u, v = np.meshgrid(lon, lat)
grid = Grid.from_data(u, lon, lat, v, lon, lat, depth, time)
u_t = np.transpose(u).reshape((lat.size, lon.size))
v_t = np.transpose(v).reshape((lat.size, lon.size))
assert len(grid.U.data.shape) == 3 # Will be 4 once we use depth
assert len(grid.V.data.shape) == 3
assert np.allclose(grid.U.data[0, :], u_t, rtol=1e-12)
assert np.allclose(grid.V.data[0, :], v_t, rtol=1e-12)
|
Grid: Add a baseline test for creating grids from datafrom parcels import Grid
import numpy as np
import pytest
@pytest.mark.parametrize('xdim', [100, 200])
@pytest.mark.parametrize('ydim', [100, 200])
def test_grid_from_data(xdim, ydim):
lon = np.linspace(0., 1., xdim, dtype=np.float32)
lat = np.linspace(0., 1., ydim, dtype=np.float32)
depth = np.zeros(1, dtype=np.float32)
time = np.zeros(1, dtype=np.float64)
u, v = np.meshgrid(lon, lat)
grid = Grid.from_data(u, lon, lat, v, lon, lat, depth, time)
u_t = np.transpose(u).reshape((lat.size, lon.size))
v_t = np.transpose(v).reshape((lat.size, lon.size))
assert len(grid.U.data.shape) == 3 # Will be 4 once we use depth
assert len(grid.V.data.shape) == 3
assert np.allclose(grid.U.data[0, :], u_t, rtol=1e-12)
assert np.allclose(grid.V.data[0, :], v_t, rtol=1e-12)
|
<commit_before><commit_msg>Grid: Add a baseline test for creating grids from data<commit_after>from parcels import Grid
import numpy as np
import pytest
@pytest.mark.parametrize('xdim', [100, 200])
@pytest.mark.parametrize('ydim', [100, 200])
def test_grid_from_data(xdim, ydim):
lon = np.linspace(0., 1., xdim, dtype=np.float32)
lat = np.linspace(0., 1., ydim, dtype=np.float32)
depth = np.zeros(1, dtype=np.float32)
time = np.zeros(1, dtype=np.float64)
u, v = np.meshgrid(lon, lat)
grid = Grid.from_data(u, lon, lat, v, lon, lat, depth, time)
u_t = np.transpose(u).reshape((lat.size, lon.size))
v_t = np.transpose(v).reshape((lat.size, lon.size))
assert len(grid.U.data.shape) == 3 # Will be 4 once we use depth
assert len(grid.V.data.shape) == 3
assert np.allclose(grid.U.data[0, :], u_t, rtol=1e-12)
assert np.allclose(grid.V.data[0, :], v_t, rtol=1e-12)
|
|
a3a377818e5521487cca1b08a4cc6adcdc7deef6
|
soft/python_test/mqttReceiver.py
|
soft/python_test/mqttReceiver.py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 10 20:30:02 2015
@author: piotr at nicecircuits.com
"""
# -*- coding: utf-8 -*-
from __future__ import print_function # compatibility with python 2 and 3
__author__ = 'piotr'
import paho.mqtt.client as mqtt
import serial, time, re, logging, numpy
server = "test.mosquitto.org"
port = 1883
global received, ids, lastId, log, ser, client
def on_connect(client, userdata, flags, rc):
print("Connected with result code " + str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe("/drOctopus/#")
#client.subscribe("/esp-link/#")
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
global received, ids, lastId, log
temp="topic: " + msg.topic + " " + " message: " + str(msg.payload)
log.debug(temp)
def kill_mqtt():
client.disconnect()
def tester_init():
global received, ids, lastId, log, ser, client
#enable debug logs
log=logging.getLogger()
log.setLevel(logging.DEBUG)
formatter=logging.Formatter('%(asctime)s - %(message)s')
logfile=logging.FileHandler("log.txt","w")
logfile.setLevel(logging.INFO)
logfile.setFormatter(formatter)
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
console.setFormatter(formatter)
log.addHandler(console)
log.addHandler(logfile)
client = mqtt.Client(protocol=3)
client.on_connect = on_connect
client.on_message = on_message
client.connect_async(server, port, 60)
log.info("Application start")
client.loop_start()
if __name__ =="__main__":
tester_init()
while 1:
pass
|
Add MQTT receiver python script
|
Add MQTT receiver python script
|
Python
|
cc0-1.0
|
NiceCircuits/DrOctopus,NiceCircuits/DrOctopus,NiceCircuits/DrOctopus,NiceCircuits/DrOctopus
|
Add MQTT receiver python script
|
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 10 20:30:02 2015
@author: piotr at nicecircuits.com
"""
# -*- coding: utf-8 -*-
from __future__ import print_function # compatibility with python 2 and 3
__author__ = 'piotr'
import paho.mqtt.client as mqtt
import serial, time, re, logging, numpy
server = "test.mosquitto.org"
port = 1883
global received, ids, lastId, log, ser, client
def on_connect(client, userdata, flags, rc):
print("Connected with result code " + str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe("/drOctopus/#")
#client.subscribe("/esp-link/#")
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
global received, ids, lastId, log
temp="topic: " + msg.topic + " " + " message: " + str(msg.payload)
log.debug(temp)
def kill_mqtt():
client.disconnect()
def tester_init():
global received, ids, lastId, log, ser, client
#enable debug logs
log=logging.getLogger()
log.setLevel(logging.DEBUG)
formatter=logging.Formatter('%(asctime)s - %(message)s')
logfile=logging.FileHandler("log.txt","w")
logfile.setLevel(logging.INFO)
logfile.setFormatter(formatter)
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
console.setFormatter(formatter)
log.addHandler(console)
log.addHandler(logfile)
client = mqtt.Client(protocol=3)
client.on_connect = on_connect
client.on_message = on_message
client.connect_async(server, port, 60)
log.info("Application start")
client.loop_start()
if __name__ =="__main__":
tester_init()
while 1:
pass
|
<commit_before><commit_msg>Add MQTT receiver python script<commit_after>
|
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 10 20:30:02 2015
@author: piotr at nicecircuits.com
"""
# -*- coding: utf-8 -*-
from __future__ import print_function # compatibility with python 2 and 3
__author__ = 'piotr'
import paho.mqtt.client as mqtt
import serial, time, re, logging, numpy
server = "test.mosquitto.org"
port = 1883
global received, ids, lastId, log, ser, client
def on_connect(client, userdata, flags, rc):
print("Connected with result code " + str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe("/drOctopus/#")
#client.subscribe("/esp-link/#")
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
global received, ids, lastId, log
temp="topic: " + msg.topic + " " + " message: " + str(msg.payload)
log.debug(temp)
def kill_mqtt():
client.disconnect()
def tester_init():
global received, ids, lastId, log, ser, client
#enable debug logs
log=logging.getLogger()
log.setLevel(logging.DEBUG)
formatter=logging.Formatter('%(asctime)s - %(message)s')
logfile=logging.FileHandler("log.txt","w")
logfile.setLevel(logging.INFO)
logfile.setFormatter(formatter)
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
console.setFormatter(formatter)
log.addHandler(console)
log.addHandler(logfile)
client = mqtt.Client(protocol=3)
client.on_connect = on_connect
client.on_message = on_message
client.connect_async(server, port, 60)
log.info("Application start")
client.loop_start()
if __name__ =="__main__":
tester_init()
while 1:
pass
|
Add MQTT receiver python script# -*- coding: utf-8 -*-
"""
Created on Tue Nov 10 20:30:02 2015
@author: piotr at nicecircuits.com
"""
# -*- coding: utf-8 -*-
from __future__ import print_function # compatibility with python 2 and 3
__author__ = 'piotr'
import paho.mqtt.client as mqtt
import serial, time, re, logging, numpy
server = "test.mosquitto.org"
port = 1883
global received, ids, lastId, log, ser, client
def on_connect(client, userdata, flags, rc):
print("Connected with result code " + str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe("/drOctopus/#")
#client.subscribe("/esp-link/#")
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
global received, ids, lastId, log
temp="topic: " + msg.topic + " " + " message: " + str(msg.payload)
log.debug(temp)
def kill_mqtt():
client.disconnect()
def tester_init():
global received, ids, lastId, log, ser, client
#enable debug logs
log=logging.getLogger()
log.setLevel(logging.DEBUG)
formatter=logging.Formatter('%(asctime)s - %(message)s')
logfile=logging.FileHandler("log.txt","w")
logfile.setLevel(logging.INFO)
logfile.setFormatter(formatter)
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
console.setFormatter(formatter)
log.addHandler(console)
log.addHandler(logfile)
client = mqtt.Client(protocol=3)
client.on_connect = on_connect
client.on_message = on_message
client.connect_async(server, port, 60)
log.info("Application start")
client.loop_start()
if __name__ =="__main__":
tester_init()
while 1:
pass
|
<commit_before><commit_msg>Add MQTT receiver python script<commit_after># -*- coding: utf-8 -*-
"""
Created on Tue Nov 10 20:30:02 2015
@author: piotr at nicecircuits.com
"""
# -*- coding: utf-8 -*-
from __future__ import print_function # compatibility with python 2 and 3
__author__ = 'piotr'
import paho.mqtt.client as mqtt
import serial, time, re, logging, numpy
server = "test.mosquitto.org"
port = 1883
global received, ids, lastId, log, ser, client
def on_connect(client, userdata, flags, rc):
print("Connected with result code " + str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe("/drOctopus/#")
#client.subscribe("/esp-link/#")
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
global received, ids, lastId, log
temp="topic: " + msg.topic + " " + " message: " + str(msg.payload)
log.debug(temp)
def kill_mqtt():
client.disconnect()
def tester_init():
global received, ids, lastId, log, ser, client
#enable debug logs
log=logging.getLogger()
log.setLevel(logging.DEBUG)
formatter=logging.Formatter('%(asctime)s - %(message)s')
logfile=logging.FileHandler("log.txt","w")
logfile.setLevel(logging.INFO)
logfile.setFormatter(formatter)
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
console.setFormatter(formatter)
log.addHandler(console)
log.addHandler(logfile)
client = mqtt.Client(protocol=3)
client.on_connect = on_connect
client.on_message = on_message
client.connect_async(server, port, 60)
log.info("Application start")
client.loop_start()
if __name__ =="__main__":
tester_init()
while 1:
pass
|
|
787be359bb09d770c218d37c1f4f989cabb8cf1f
|
chainerrl/misc/reward_filter.py
|
chainerrl/misc/reward_filter.py
|
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
class NormalizedRewardFilter(object):
def __init__(self, tau=1e-3, scale=1, eps=1e-1):
self.tau = tau
self.scale = scale
self.average_reward = 0
self.average_reward_squared = 0
self.eps = eps
def __call__(self, reward):
self.average_reward *= 1 - self.tau
self.average_reward += self.tau * reward
self.average_reward_squared *= 1 - self.tau
self.average_reward_squared += self.tau * reward ** 2
var = self.average_reward_squared - self.average_reward ** 2
stdev = min(var, self.eps) ** 0.5
return self.scale * (reward - self.average_reward) / stdev
class AverageRewardFilter(object):
def __init__(self, tau=1e-3):
self.tau = tau
self.average_reward = 0
def __call__(self, reward):
self.average_reward *= 1 - self.tau
self.average_reward += self.tau * reward
return reward - self.average_reward
|
Add reward filters to use average rewards
|
Add reward filters to use average rewards
|
Python
|
mit
|
toslunar/chainerrl,toslunar/chainerrl
|
Add reward filters to use average rewards
|
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
class NormalizedRewardFilter(object):
def __init__(self, tau=1e-3, scale=1, eps=1e-1):
self.tau = tau
self.scale = scale
self.average_reward = 0
self.average_reward_squared = 0
self.eps = eps
def __call__(self, reward):
self.average_reward *= 1 - self.tau
self.average_reward += self.tau * reward
self.average_reward_squared *= 1 - self.tau
self.average_reward_squared += self.tau * reward ** 2
var = self.average_reward_squared - self.average_reward ** 2
stdev = min(var, self.eps) ** 0.5
return self.scale * (reward - self.average_reward) / stdev
class AverageRewardFilter(object):
def __init__(self, tau=1e-3):
self.tau = tau
self.average_reward = 0
def __call__(self, reward):
self.average_reward *= 1 - self.tau
self.average_reward += self.tau * reward
return reward - self.average_reward
|
<commit_before><commit_msg>Add reward filters to use average rewards<commit_after>
|
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
class NormalizedRewardFilter(object):
def __init__(self, tau=1e-3, scale=1, eps=1e-1):
self.tau = tau
self.scale = scale
self.average_reward = 0
self.average_reward_squared = 0
self.eps = eps
def __call__(self, reward):
self.average_reward *= 1 - self.tau
self.average_reward += self.tau * reward
self.average_reward_squared *= 1 - self.tau
self.average_reward_squared += self.tau * reward ** 2
var = self.average_reward_squared - self.average_reward ** 2
stdev = min(var, self.eps) ** 0.5
return self.scale * (reward - self.average_reward) / stdev
class AverageRewardFilter(object):
def __init__(self, tau=1e-3):
self.tau = tau
self.average_reward = 0
def __call__(self, reward):
self.average_reward *= 1 - self.tau
self.average_reward += self.tau * reward
return reward - self.average_reward
|
Add reward filters to use average rewardsfrom __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
class NormalizedRewardFilter(object):
def __init__(self, tau=1e-3, scale=1, eps=1e-1):
self.tau = tau
self.scale = scale
self.average_reward = 0
self.average_reward_squared = 0
self.eps = eps
def __call__(self, reward):
self.average_reward *= 1 - self.tau
self.average_reward += self.tau * reward
self.average_reward_squared *= 1 - self.tau
self.average_reward_squared += self.tau * reward ** 2
var = self.average_reward_squared - self.average_reward ** 2
stdev = min(var, self.eps) ** 0.5
return self.scale * (reward - self.average_reward) / stdev
class AverageRewardFilter(object):
def __init__(self, tau=1e-3):
self.tau = tau
self.average_reward = 0
def __call__(self, reward):
self.average_reward *= 1 - self.tau
self.average_reward += self.tau * reward
return reward - self.average_reward
|
<commit_before><commit_msg>Add reward filters to use average rewards<commit_after>from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
class NormalizedRewardFilter(object):
def __init__(self, tau=1e-3, scale=1, eps=1e-1):
self.tau = tau
self.scale = scale
self.average_reward = 0
self.average_reward_squared = 0
self.eps = eps
def __call__(self, reward):
self.average_reward *= 1 - self.tau
self.average_reward += self.tau * reward
self.average_reward_squared *= 1 - self.tau
self.average_reward_squared += self.tau * reward ** 2
var = self.average_reward_squared - self.average_reward ** 2
stdev = min(var, self.eps) ** 0.5
return self.scale * (reward - self.average_reward) / stdev
class AverageRewardFilter(object):
def __init__(self, tau=1e-3):
self.tau = tau
self.average_reward = 0
def __call__(self, reward):
self.average_reward *= 1 - self.tau
self.average_reward += self.tau * reward
return reward - self.average_reward
|
|
e099786cfd080cc2616fcd22a62954b71740528b
|
tests/test_main.py
|
tests/test_main.py
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import pretend
import pytest
from fenrir import __main__
@pytest.mark.parametrize(
("argv", "options"),
[
(["example:app"], (["example:app"], {"bind": None})),
(
["example:app", "-b", "0.0.0.0:5000"],
(["example:app"], {"bind": ["0.0.0.0:5000"]}),
),
],
)
def test_main(argv, options, monkeypatch):
server_obj = pretend.stub(spawn=pretend.call_recorder(lambda: None))
server_cls = pretend.call_recorder(lambda *a, **kw: server_obj)
monkeypatch.setattr(__main__, "Server", server_cls)
__main__.main(argv)
assert server_cls.calls == [pretend.call(*options[0], **options[1])]
assert server_obj.spawn.calls == [pretend.call()]
def test_entrypoint(monkeypatch):
exitcode = pretend.stub()
main = pretend.call_recorder(lambda argv: exitcode)
argv = ["fenrir", "-b", "0.0.0.0:8000"]
monkeypatch.setattr(__main__, "main", main)
monkeypatch.setattr(sys, "argv", argv)
assert __main__.entrypoint() is exitcode
assert main.calls == [pretend.call(["-b", "0.0.0.0:8000"])]
|
Test the CLI interface of fenrir
|
Test the CLI interface of fenrir
|
Python
|
apache-2.0
|
dstufft/fenrir,dstufft/fenrir
|
Test the CLI interface of fenrir
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import pretend
import pytest
from fenrir import __main__
@pytest.mark.parametrize(
("argv", "options"),
[
(["example:app"], (["example:app"], {"bind": None})),
(
["example:app", "-b", "0.0.0.0:5000"],
(["example:app"], {"bind": ["0.0.0.0:5000"]}),
),
],
)
def test_main(argv, options, monkeypatch):
server_obj = pretend.stub(spawn=pretend.call_recorder(lambda: None))
server_cls = pretend.call_recorder(lambda *a, **kw: server_obj)
monkeypatch.setattr(__main__, "Server", server_cls)
__main__.main(argv)
assert server_cls.calls == [pretend.call(*options[0], **options[1])]
assert server_obj.spawn.calls == [pretend.call()]
def test_entrypoint(monkeypatch):
exitcode = pretend.stub()
main = pretend.call_recorder(lambda argv: exitcode)
argv = ["fenrir", "-b", "0.0.0.0:8000"]
monkeypatch.setattr(__main__, "main", main)
monkeypatch.setattr(sys, "argv", argv)
assert __main__.entrypoint() is exitcode
assert main.calls == [pretend.call(["-b", "0.0.0.0:8000"])]
|
<commit_before><commit_msg>Test the CLI interface of fenrir<commit_after>
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import pretend
import pytest
from fenrir import __main__
@pytest.mark.parametrize(
("argv", "options"),
[
(["example:app"], (["example:app"], {"bind": None})),
(
["example:app", "-b", "0.0.0.0:5000"],
(["example:app"], {"bind": ["0.0.0.0:5000"]}),
),
],
)
def test_main(argv, options, monkeypatch):
server_obj = pretend.stub(spawn=pretend.call_recorder(lambda: None))
server_cls = pretend.call_recorder(lambda *a, **kw: server_obj)
monkeypatch.setattr(__main__, "Server", server_cls)
__main__.main(argv)
assert server_cls.calls == [pretend.call(*options[0], **options[1])]
assert server_obj.spawn.calls == [pretend.call()]
def test_entrypoint(monkeypatch):
exitcode = pretend.stub()
main = pretend.call_recorder(lambda argv: exitcode)
argv = ["fenrir", "-b", "0.0.0.0:8000"]
monkeypatch.setattr(__main__, "main", main)
monkeypatch.setattr(sys, "argv", argv)
assert __main__.entrypoint() is exitcode
assert main.calls == [pretend.call(["-b", "0.0.0.0:8000"])]
|
Test the CLI interface of fenrir# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import pretend
import pytest
from fenrir import __main__
@pytest.mark.parametrize(
("argv", "options"),
[
(["example:app"], (["example:app"], {"bind": None})),
(
["example:app", "-b", "0.0.0.0:5000"],
(["example:app"], {"bind": ["0.0.0.0:5000"]}),
),
],
)
def test_main(argv, options, monkeypatch):
server_obj = pretend.stub(spawn=pretend.call_recorder(lambda: None))
server_cls = pretend.call_recorder(lambda *a, **kw: server_obj)
monkeypatch.setattr(__main__, "Server", server_cls)
__main__.main(argv)
assert server_cls.calls == [pretend.call(*options[0], **options[1])]
assert server_obj.spawn.calls == [pretend.call()]
def test_entrypoint(monkeypatch):
exitcode = pretend.stub()
main = pretend.call_recorder(lambda argv: exitcode)
argv = ["fenrir", "-b", "0.0.0.0:8000"]
monkeypatch.setattr(__main__, "main", main)
monkeypatch.setattr(sys, "argv", argv)
assert __main__.entrypoint() is exitcode
assert main.calls == [pretend.call(["-b", "0.0.0.0:8000"])]
|
<commit_before><commit_msg>Test the CLI interface of fenrir<commit_after># Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import pretend
import pytest
from fenrir import __main__
@pytest.mark.parametrize(
("argv", "options"),
[
(["example:app"], (["example:app"], {"bind": None})),
(
["example:app", "-b", "0.0.0.0:5000"],
(["example:app"], {"bind": ["0.0.0.0:5000"]}),
),
],
)
def test_main(argv, options, monkeypatch):
server_obj = pretend.stub(spawn=pretend.call_recorder(lambda: None))
server_cls = pretend.call_recorder(lambda *a, **kw: server_obj)
monkeypatch.setattr(__main__, "Server", server_cls)
__main__.main(argv)
assert server_cls.calls == [pretend.call(*options[0], **options[1])]
assert server_obj.spawn.calls == [pretend.call()]
def test_entrypoint(monkeypatch):
exitcode = pretend.stub()
main = pretend.call_recorder(lambda argv: exitcode)
argv = ["fenrir", "-b", "0.0.0.0:8000"]
monkeypatch.setattr(__main__, "main", main)
monkeypatch.setattr(sys, "argv", argv)
assert __main__.entrypoint() is exitcode
assert main.calls == [pretend.call(["-b", "0.0.0.0:8000"])]
|
|
abf6af81b5f97ca6b6bb479adb1abfdf502d2a9b
|
utils/solve-all.py
|
utils/solve-all.py
|
import os
import subprocess
import sys
import time
paths = []
for path, dirs, files in os.walk('puzzles'):
for file in files:
paths.append(os.path.join(path, file))
for path in paths:
for method in ['human', 'hybrid']:
start = time.time()
try:
output = subprocess.check_output(['python3', 'takuzu.py', '--method', method, path], stderr = subprocess.STDOUT, timeout = 60)
except subprocess.TimeoutExpired:
output = False
end = time.time()
print('{file}\t{method}\t{time}'.format(
file = '\t'.join(path.rsplit('.', 1)[0].split('/')[1:]),
method = method,
time = '{:.02f}'.format(end - start) if output else 'false',
))
sys.stdout.flush()
|
Add a wrapper to solve all puzzles in ./puzzles and print out timings (timeout after a minute)
|
Add a wrapper to solve all puzzles in ./puzzles and print out timings (timeout after a minute)
|
Python
|
bsd-3-clause
|
jpverkamp/takuzu
|
Add a wrapper to solve all puzzles in ./puzzles and print out timings (timeout after a minute)
|
import os
import subprocess
import sys
import time
paths = []
for path, dirs, files in os.walk('puzzles'):
for file in files:
paths.append(os.path.join(path, file))
for path in paths:
for method in ['human', 'hybrid']:
start = time.time()
try:
output = subprocess.check_output(['python3', 'takuzu.py', '--method', method, path], stderr = subprocess.STDOUT, timeout = 60)
except subprocess.TimeoutExpired:
output = False
end = time.time()
print('{file}\t{method}\t{time}'.format(
file = '\t'.join(path.rsplit('.', 1)[0].split('/')[1:]),
method = method,
time = '{:.02f}'.format(end - start) if output else 'false',
))
sys.stdout.flush()
|
<commit_before><commit_msg>Add a wrapper to solve all puzzles in ./puzzles and print out timings (timeout after a minute)<commit_after>
|
import os
import subprocess
import sys
import time
paths = []
for path, dirs, files in os.walk('puzzles'):
for file in files:
paths.append(os.path.join(path, file))
for path in paths:
for method in ['human', 'hybrid']:
start = time.time()
try:
output = subprocess.check_output(['python3', 'takuzu.py', '--method', method, path], stderr = subprocess.STDOUT, timeout = 60)
except subprocess.TimeoutExpired:
output = False
end = time.time()
print('{file}\t{method}\t{time}'.format(
file = '\t'.join(path.rsplit('.', 1)[0].split('/')[1:]),
method = method,
time = '{:.02f}'.format(end - start) if output else 'false',
))
sys.stdout.flush()
|
Add a wrapper to solve all puzzles in ./puzzles and print out timings (timeout after a minute)import os
import subprocess
import sys
import time
paths = []
for path, dirs, files in os.walk('puzzles'):
for file in files:
paths.append(os.path.join(path, file))
for path in paths:
for method in ['human', 'hybrid']:
start = time.time()
try:
output = subprocess.check_output(['python3', 'takuzu.py', '--method', method, path], stderr = subprocess.STDOUT, timeout = 60)
except subprocess.TimeoutExpired:
output = False
end = time.time()
print('{file}\t{method}\t{time}'.format(
file = '\t'.join(path.rsplit('.', 1)[0].split('/')[1:]),
method = method,
time = '{:.02f}'.format(end - start) if output else 'false',
))
sys.stdout.flush()
|
<commit_before><commit_msg>Add a wrapper to solve all puzzles in ./puzzles and print out timings (timeout after a minute)<commit_after>import os
import subprocess
import sys
import time
paths = []
for path, dirs, files in os.walk('puzzles'):
for file in files:
paths.append(os.path.join(path, file))
for path in paths:
for method in ['human', 'hybrid']:
start = time.time()
try:
output = subprocess.check_output(['python3', 'takuzu.py', '--method', method, path], stderr = subprocess.STDOUT, timeout = 60)
except subprocess.TimeoutExpired:
output = False
end = time.time()
print('{file}\t{method}\t{time}'.format(
file = '\t'.join(path.rsplit('.', 1)[0].split('/')[1:]),
method = method,
time = '{:.02f}'.format(end - start) if output else 'false',
))
sys.stdout.flush()
|
|
7fcc89f131753432fe42c0b3c373d3008353ba39
|
tools/novasetup.py
|
tools/novasetup.py
|
# Still some problems...
import time
import shutil
from configobj import ConfigObj
NOVA_API_CONF = "/etc/nova/api-paste.ini"
OS_API_SEC = "composite:openstack_compute_api_v2"
DR_FILTER_TARGET_KEY = "keystone_nolimit"
DR_FILTER_TARGET_KEY_VALUE = "compute_req_id faultwrap sizelimit " \
"authtoken keystonecontext drfilter " \
"osapi_compute_app_v2"
DR_SEC = "filter:drfilter"
DR_KEY = "paste.filter_factory"
DR_KEY_VALUE = "drfilter.urlforwarding:url_forwarding_factory"
# Backup /etc/nova/api-paste.ini
now = time.strftime('%Y%m%d%H%M%S')
target = NOVA_API_CONF + "." + now + ".bak"
shutil.copyfile(NOVA_API_CONF, target)
# Update /etc/nova/api-paste.ini
conf = ConfigObj(NOVA_API_CONF)
conf[OS_API_SEC][DR_FILTER_TARGET_KEY] = DR_FILTER_TARGET_KEY_VALUE
conf[DR_SEC] = {}
conf[DR_SEC][DR_KEY] = DR_KEY_VALUE
conf.write()
for sec in conf:
print(sec)
for key in conf[sec]:
print("\t" + key + " = " + conf[sec][key])
|
Add a tools to config nova paste, not finished
|
Add a tools to config nova paste, not finished
|
Python
|
apache-2.0
|
fs714/drfilter
|
Add a tools to config nova paste, not finished
|
# Still some problems...
import time
import shutil
from configobj import ConfigObj
NOVA_API_CONF = "/etc/nova/api-paste.ini"
OS_API_SEC = "composite:openstack_compute_api_v2"
DR_FILTER_TARGET_KEY = "keystone_nolimit"
DR_FILTER_TARGET_KEY_VALUE = "compute_req_id faultwrap sizelimit " \
"authtoken keystonecontext drfilter " \
"osapi_compute_app_v2"
DR_SEC = "filter:drfilter"
DR_KEY = "paste.filter_factory"
DR_KEY_VALUE = "drfilter.urlforwarding:url_forwarding_factory"
# Backup /etc/nova/api-paste.ini
now = time.strftime('%Y%m%d%H%M%S')
target = NOVA_API_CONF + "." + now + ".bak"
shutil.copyfile(NOVA_API_CONF, target)
# Update /etc/nova/api-paste.ini
conf = ConfigObj(NOVA_API_CONF)
conf[OS_API_SEC][DR_FILTER_TARGET_KEY] = DR_FILTER_TARGET_KEY_VALUE
conf[DR_SEC] = {}
conf[DR_SEC][DR_KEY] = DR_KEY_VALUE
conf.write()
for sec in conf:
print(sec)
for key in conf[sec]:
print("\t" + key + " = " + conf[sec][key])
|
<commit_before><commit_msg>Add a tools to config nova paste, not finished<commit_after>
|
# Still some problems...
import time
import shutil
from configobj import ConfigObj
NOVA_API_CONF = "/etc/nova/api-paste.ini"
OS_API_SEC = "composite:openstack_compute_api_v2"
DR_FILTER_TARGET_KEY = "keystone_nolimit"
DR_FILTER_TARGET_KEY_VALUE = "compute_req_id faultwrap sizelimit " \
"authtoken keystonecontext drfilter " \
"osapi_compute_app_v2"
DR_SEC = "filter:drfilter"
DR_KEY = "paste.filter_factory"
DR_KEY_VALUE = "drfilter.urlforwarding:url_forwarding_factory"
# Backup /etc/nova/api-paste.ini
now = time.strftime('%Y%m%d%H%M%S')
target = NOVA_API_CONF + "." + now + ".bak"
shutil.copyfile(NOVA_API_CONF, target)
# Update /etc/nova/api-paste.ini
conf = ConfigObj(NOVA_API_CONF)
conf[OS_API_SEC][DR_FILTER_TARGET_KEY] = DR_FILTER_TARGET_KEY_VALUE
conf[DR_SEC] = {}
conf[DR_SEC][DR_KEY] = DR_KEY_VALUE
conf.write()
for sec in conf:
print(sec)
for key in conf[sec]:
print("\t" + key + " = " + conf[sec][key])
|
Add a tools to config nova paste, not finished# Still some problems...
import time
import shutil
from configobj import ConfigObj
NOVA_API_CONF = "/etc/nova/api-paste.ini"
OS_API_SEC = "composite:openstack_compute_api_v2"
DR_FILTER_TARGET_KEY = "keystone_nolimit"
DR_FILTER_TARGET_KEY_VALUE = "compute_req_id faultwrap sizelimit " \
"authtoken keystonecontext drfilter " \
"osapi_compute_app_v2"
DR_SEC = "filter:drfilter"
DR_KEY = "paste.filter_factory"
DR_KEY_VALUE = "drfilter.urlforwarding:url_forwarding_factory"
# Backup /etc/nova/api-paste.ini
now = time.strftime('%Y%m%d%H%M%S')
target = NOVA_API_CONF + "." + now + ".bak"
shutil.copyfile(NOVA_API_CONF, target)
# Update /etc/nova/api-paste.ini
conf = ConfigObj(NOVA_API_CONF)
conf[OS_API_SEC][DR_FILTER_TARGET_KEY] = DR_FILTER_TARGET_KEY_VALUE
conf[DR_SEC] = {}
conf[DR_SEC][DR_KEY] = DR_KEY_VALUE
conf.write()
for sec in conf:
print(sec)
for key in conf[sec]:
print("\t" + key + " = " + conf[sec][key])
|
<commit_before><commit_msg>Add a tools to config nova paste, not finished<commit_after># Still some problems...
import time
import shutil
from configobj import ConfigObj
NOVA_API_CONF = "/etc/nova/api-paste.ini"
OS_API_SEC = "composite:openstack_compute_api_v2"
DR_FILTER_TARGET_KEY = "keystone_nolimit"
DR_FILTER_TARGET_KEY_VALUE = "compute_req_id faultwrap sizelimit " \
"authtoken keystonecontext drfilter " \
"osapi_compute_app_v2"
DR_SEC = "filter:drfilter"
DR_KEY = "paste.filter_factory"
DR_KEY_VALUE = "drfilter.urlforwarding:url_forwarding_factory"
# Backup /etc/nova/api-paste.ini
now = time.strftime('%Y%m%d%H%M%S')
target = NOVA_API_CONF + "." + now + ".bak"
shutil.copyfile(NOVA_API_CONF, target)
# Update /etc/nova/api-paste.ini
conf = ConfigObj(NOVA_API_CONF)
conf[OS_API_SEC][DR_FILTER_TARGET_KEY] = DR_FILTER_TARGET_KEY_VALUE
conf[DR_SEC] = {}
conf[DR_SEC][DR_KEY] = DR_KEY_VALUE
conf.write()
for sec in conf:
print(sec)
for key in conf[sec]:
print("\t" + key + " = " + conf[sec][key])
|
|
edf2c0d777672568b2223fdbd6858f9c9a34ee44
|
DataWrangling/scraping_web2.py
|
DataWrangling/scraping_web2.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Script to get information from:
http://www.transtats.bts.gov/Data_Elements.aspx?Data=2
about carrier and airports
"""
from bs4 import BeautifulSoup
import requests
import urllib2
def extract_data(url, s):
# Extract data from a html source from a URL
r = s.get(url)
soup = BeautifulSoup(r.text)
data = {"eventvalidation": "",
"viewstate": ""}
eventvalidation_element = soup.find(id="__EVENTVALIDATION")
data["eventvalidation"] = eventvalidation_element["value"]
viewstate_element = soup.find(id="__VIEWSTATE")
data["viewstate"] = viewstate_element["value"]
return data
def make_request(data, s):
# Make request to get data
eventvalidation = data["eventvalidation"]
viewstate = data["viewstate"]
r = s.post("http://www.transtats.bts.gov/Data_Elements.aspx?Data=2",
data={'AirportList': "BOS",
'CarrierList': "VX",
'Submit': 'Submit',
"__EVENTTARGET": "",
"__EVENTARGUMENT": "",
"__EVENTVALIDATION": eventvalidation,
"__VIEWSTATE": viewstate
})
return r.text
def make_file(html):
# Make file with the result data
f = open("text.html", "w")
f.write(html)
def options(soup, id):
# Get data about options: airport and carriers
option_values = []
carrier_list = soup.find(id=id)
for option in carrier_list.find_all('option'):
option_values.append(option['value'])
return option_values
def print_list(label, codes):
# Print data
print "\n%s:" % label
for c in codes:
print c
def get_web(url):
# Get url
page = urllib2.urlopen(url)
page_source = page.read()
return page_source
def main():
# setup the location files
URL = 'http://www.transtats.bts.gov/Data_Elements.aspx?Data=2'
page_source = get_web(URL)
soup = BeautifulSoup(page_source)
codes = options(soup, "CarrierList")
print_list("Carriers", codes)
codes = options(soup, "AirportList")
print_list("Airports", codes)
s = requests.Session()
data = extract_data(URL, s)
html = make_request(data, s)
make_file(html)
if __name__ == '__main__':
main()
|
Create file to get information from a particular URL to get data about airplanes and carriers.
|
feat: Create file to get information from a particular URL to get data about airplanes and carriers.
|
Python
|
mit
|
aguijarro/DataSciencePython
|
feat: Create file to get information from a particular URL to get data about airplanes and carriers.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Script to get information from:
http://www.transtats.bts.gov/Data_Elements.aspx?Data=2
about carrier and airports
"""
from bs4 import BeautifulSoup
import requests
import urllib2
def extract_data(url, s):
# Extract data from a html source from a URL
r = s.get(url)
soup = BeautifulSoup(r.text)
data = {"eventvalidation": "",
"viewstate": ""}
eventvalidation_element = soup.find(id="__EVENTVALIDATION")
data["eventvalidation"] = eventvalidation_element["value"]
viewstate_element = soup.find(id="__VIEWSTATE")
data["viewstate"] = viewstate_element["value"]
return data
def make_request(data, s):
# Make request to get data
eventvalidation = data["eventvalidation"]
viewstate = data["viewstate"]
r = s.post("http://www.transtats.bts.gov/Data_Elements.aspx?Data=2",
data={'AirportList': "BOS",
'CarrierList': "VX",
'Submit': 'Submit',
"__EVENTTARGET": "",
"__EVENTARGUMENT": "",
"__EVENTVALIDATION": eventvalidation,
"__VIEWSTATE": viewstate
})
return r.text
def make_file(html):
# Make file with the result data
f = open("text.html", "w")
f.write(html)
def options(soup, id):
# Get data about options: airport and carriers
option_values = []
carrier_list = soup.find(id=id)
for option in carrier_list.find_all('option'):
option_values.append(option['value'])
return option_values
def print_list(label, codes):
# Print data
print "\n%s:" % label
for c in codes:
print c
def get_web(url):
# Get url
page = urllib2.urlopen(url)
page_source = page.read()
return page_source
def main():
# setup the location files
URL = 'http://www.transtats.bts.gov/Data_Elements.aspx?Data=2'
page_source = get_web(URL)
soup = BeautifulSoup(page_source)
codes = options(soup, "CarrierList")
print_list("Carriers", codes)
codes = options(soup, "AirportList")
print_list("Airports", codes)
s = requests.Session()
data = extract_data(URL, s)
html = make_request(data, s)
make_file(html)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>feat: Create file to get information from a particular URL to get data about airplanes and carriers.<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Script to get information from:
http://www.transtats.bts.gov/Data_Elements.aspx?Data=2
about carrier and airports
"""
from bs4 import BeautifulSoup
import requests
import urllib2
def extract_data(url, s):
# Extract data from a html source from a URL
r = s.get(url)
soup = BeautifulSoup(r.text)
data = {"eventvalidation": "",
"viewstate": ""}
eventvalidation_element = soup.find(id="__EVENTVALIDATION")
data["eventvalidation"] = eventvalidation_element["value"]
viewstate_element = soup.find(id="__VIEWSTATE")
data["viewstate"] = viewstate_element["value"]
return data
def make_request(data, s):
# Make request to get data
eventvalidation = data["eventvalidation"]
viewstate = data["viewstate"]
r = s.post("http://www.transtats.bts.gov/Data_Elements.aspx?Data=2",
data={'AirportList': "BOS",
'CarrierList': "VX",
'Submit': 'Submit',
"__EVENTTARGET": "",
"__EVENTARGUMENT": "",
"__EVENTVALIDATION": eventvalidation,
"__VIEWSTATE": viewstate
})
return r.text
def make_file(html):
# Make file with the result data
f = open("text.html", "w")
f.write(html)
def options(soup, id):
# Get data about options: airport and carriers
option_values = []
carrier_list = soup.find(id=id)
for option in carrier_list.find_all('option'):
option_values.append(option['value'])
return option_values
def print_list(label, codes):
# Print data
print "\n%s:" % label
for c in codes:
print c
def get_web(url):
# Get url
page = urllib2.urlopen(url)
page_source = page.read()
return page_source
def main():
# setup the location files
URL = 'http://www.transtats.bts.gov/Data_Elements.aspx?Data=2'
page_source = get_web(URL)
soup = BeautifulSoup(page_source)
codes = options(soup, "CarrierList")
print_list("Carriers", codes)
codes = options(soup, "AirportList")
print_list("Airports", codes)
s = requests.Session()
data = extract_data(URL, s)
html = make_request(data, s)
make_file(html)
if __name__ == '__main__':
main()
|
feat: Create file to get information from a particular URL to get data about airplanes and carriers.#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Script to get information from:
http://www.transtats.bts.gov/Data_Elements.aspx?Data=2
about carrier and airports
"""
from bs4 import BeautifulSoup
import requests
import urllib2
def extract_data(url, s):
# Extract data from a html source from a URL
r = s.get(url)
soup = BeautifulSoup(r.text)
data = {"eventvalidation": "",
"viewstate": ""}
eventvalidation_element = soup.find(id="__EVENTVALIDATION")
data["eventvalidation"] = eventvalidation_element["value"]
viewstate_element = soup.find(id="__VIEWSTATE")
data["viewstate"] = viewstate_element["value"]
return data
def make_request(data, s):
# Make request to get data
eventvalidation = data["eventvalidation"]
viewstate = data["viewstate"]
r = s.post("http://www.transtats.bts.gov/Data_Elements.aspx?Data=2",
data={'AirportList': "BOS",
'CarrierList': "VX",
'Submit': 'Submit',
"__EVENTTARGET": "",
"__EVENTARGUMENT": "",
"__EVENTVALIDATION": eventvalidation,
"__VIEWSTATE": viewstate
})
return r.text
def make_file(html):
# Make file with the result data
f = open("text.html", "w")
f.write(html)
def options(soup, id):
# Get data about options: airport and carriers
option_values = []
carrier_list = soup.find(id=id)
for option in carrier_list.find_all('option'):
option_values.append(option['value'])
return option_values
def print_list(label, codes):
# Print data
print "\n%s:" % label
for c in codes:
print c
def get_web(url):
# Get url
page = urllib2.urlopen(url)
page_source = page.read()
return page_source
def main():
# setup the location files
URL = 'http://www.transtats.bts.gov/Data_Elements.aspx?Data=2'
page_source = get_web(URL)
soup = BeautifulSoup(page_source)
codes = options(soup, "CarrierList")
print_list("Carriers", codes)
codes = options(soup, "AirportList")
print_list("Airports", codes)
s = requests.Session()
data = extract_data(URL, s)
html = make_request(data, s)
make_file(html)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>feat: Create file to get information from a particular URL to get data about airplanes and carriers.<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Script to get information from:
http://www.transtats.bts.gov/Data_Elements.aspx?Data=2
about carrier and airports
"""
from bs4 import BeautifulSoup
import requests
import urllib2
def extract_data(url, s):
# Extract data from a html source from a URL
r = s.get(url)
soup = BeautifulSoup(r.text)
data = {"eventvalidation": "",
"viewstate": ""}
eventvalidation_element = soup.find(id="__EVENTVALIDATION")
data["eventvalidation"] = eventvalidation_element["value"]
viewstate_element = soup.find(id="__VIEWSTATE")
data["viewstate"] = viewstate_element["value"]
return data
def make_request(data, s):
# Make request to get data
eventvalidation = data["eventvalidation"]
viewstate = data["viewstate"]
r = s.post("http://www.transtats.bts.gov/Data_Elements.aspx?Data=2",
data={'AirportList': "BOS",
'CarrierList': "VX",
'Submit': 'Submit',
"__EVENTTARGET": "",
"__EVENTARGUMENT": "",
"__EVENTVALIDATION": eventvalidation,
"__VIEWSTATE": viewstate
})
return r.text
def make_file(html):
# Make file with the result data
f = open("text.html", "w")
f.write(html)
def options(soup, id):
# Get data about options: airport and carriers
option_values = []
carrier_list = soup.find(id=id)
for option in carrier_list.find_all('option'):
option_values.append(option['value'])
return option_values
def print_list(label, codes):
# Print data
print "\n%s:" % label
for c in codes:
print c
def get_web(url):
# Get url
page = urllib2.urlopen(url)
page_source = page.read()
return page_source
def main():
# setup the location files
URL = 'http://www.transtats.bts.gov/Data_Elements.aspx?Data=2'
page_source = get_web(URL)
soup = BeautifulSoup(page_source)
codes = options(soup, "CarrierList")
print_list("Carriers", codes)
codes = options(soup, "AirportList")
print_list("Airports", codes)
s = requests.Session()
data = extract_data(URL, s)
html = make_request(data, s)
make_file(html)
if __name__ == '__main__':
main()
|
|
a239a0b213009c61a7f21f673f8698c3227048e2
|
tools/write_frames_linux_64.py
|
tools/write_frames_linux_64.py
|
#!/usr/bin/env python
import os
import sys
import json
import r2pipe
from getopt import getopt, GetoptError
r2 = None
def write_frame(frame_no, lvl_prefix):
lvl_path = os.path.join(lvl_prefix, "%d.lvl" % frame_no)
print "writing frame %d from %s..." % (frame_no, lvl_path)
lvl_frame = json.load(fp = open(lvl_path, "r"))
for obj in lvl_frame['objects']:
if obj["error"] != 0:
continue
r2.cmd("s %d" % obj["addr_pos_x"])
r2.cmd("s+ 1")
r2.cmd("wv4 %d" % obj["pos_x"])
r2.cmd("s %d" % obj["addr_pos_y"])
r2.cmd("s+ 1")
r2.cmd("wv4 %d" % obj["pos_y"])
if __name__ == "__main__":
engine_path = "Chowdren"
out_dir = "."
try:
opt_val, params = getopt(sys.argv[1:], "i:o:", ["in=", "out="])
for option, value in opt_val:
if option == "-i" or option == "--in":
engine_path = value
elif option == "-o" or option == "--out":
out_dir = value
except GetoptError:
print usage_string
exit(1)
r2 = r2pipe.open(engine_path)
r2.cmd("oo+")
for frame_no in range(1, 88):
lvl_path = os.path.join(out_dir, "%d.lvl" % frame_no)
write_frame(frame_no, out_dir)
|
Add a script to write frame changes to the executable.
|
Add a script to write frame changes to the executable.
|
Python
|
bsd-2-clause
|
snickerbockers/freedom_editor
|
Add a script to write frame changes to the executable.
|
#!/usr/bin/env python
import os
import sys
import json
import r2pipe
from getopt import getopt, GetoptError
r2 = None
def write_frame(frame_no, lvl_prefix):
lvl_path = os.path.join(lvl_prefix, "%d.lvl" % frame_no)
print "writing frame %d from %s..." % (frame_no, lvl_path)
lvl_frame = json.load(fp = open(lvl_path, "r"))
for obj in lvl_frame['objects']:
if obj["error"] != 0:
continue
r2.cmd("s %d" % obj["addr_pos_x"])
r2.cmd("s+ 1")
r2.cmd("wv4 %d" % obj["pos_x"])
r2.cmd("s %d" % obj["addr_pos_y"])
r2.cmd("s+ 1")
r2.cmd("wv4 %d" % obj["pos_y"])
if __name__ == "__main__":
engine_path = "Chowdren"
out_dir = "."
try:
opt_val, params = getopt(sys.argv[1:], "i:o:", ["in=", "out="])
for option, value in opt_val:
if option == "-i" or option == "--in":
engine_path = value
elif option == "-o" or option == "--out":
out_dir = value
except GetoptError:
print usage_string
exit(1)
r2 = r2pipe.open(engine_path)
r2.cmd("oo+")
for frame_no in range(1, 88):
lvl_path = os.path.join(out_dir, "%d.lvl" % frame_no)
write_frame(frame_no, out_dir)
|
<commit_before><commit_msg>Add a script to write frame changes to the executable.<commit_after>
|
#!/usr/bin/env python
import os
import sys
import json
import r2pipe
from getopt import getopt, GetoptError
r2 = None
def write_frame(frame_no, lvl_prefix):
lvl_path = os.path.join(lvl_prefix, "%d.lvl" % frame_no)
print "writing frame %d from %s..." % (frame_no, lvl_path)
lvl_frame = json.load(fp = open(lvl_path, "r"))
for obj in lvl_frame['objects']:
if obj["error"] != 0:
continue
r2.cmd("s %d" % obj["addr_pos_x"])
r2.cmd("s+ 1")
r2.cmd("wv4 %d" % obj["pos_x"])
r2.cmd("s %d" % obj["addr_pos_y"])
r2.cmd("s+ 1")
r2.cmd("wv4 %d" % obj["pos_y"])
if __name__ == "__main__":
engine_path = "Chowdren"
out_dir = "."
try:
opt_val, params = getopt(sys.argv[1:], "i:o:", ["in=", "out="])
for option, value in opt_val:
if option == "-i" or option == "--in":
engine_path = value
elif option == "-o" or option == "--out":
out_dir = value
except GetoptError:
print usage_string
exit(1)
r2 = r2pipe.open(engine_path)
r2.cmd("oo+")
for frame_no in range(1, 88):
lvl_path = os.path.join(out_dir, "%d.lvl" % frame_no)
write_frame(frame_no, out_dir)
|
Add a script to write frame changes to the executable.#!/usr/bin/env python
import os
import sys
import json
import r2pipe
from getopt import getopt, GetoptError
r2 = None
def write_frame(frame_no, lvl_prefix):
lvl_path = os.path.join(lvl_prefix, "%d.lvl" % frame_no)
print "writing frame %d from %s..." % (frame_no, lvl_path)
lvl_frame = json.load(fp = open(lvl_path, "r"))
for obj in lvl_frame['objects']:
if obj["error"] != 0:
continue
r2.cmd("s %d" % obj["addr_pos_x"])
r2.cmd("s+ 1")
r2.cmd("wv4 %d" % obj["pos_x"])
r2.cmd("s %d" % obj["addr_pos_y"])
r2.cmd("s+ 1")
r2.cmd("wv4 %d" % obj["pos_y"])
if __name__ == "__main__":
engine_path = "Chowdren"
out_dir = "."
try:
opt_val, params = getopt(sys.argv[1:], "i:o:", ["in=", "out="])
for option, value in opt_val:
if option == "-i" or option == "--in":
engine_path = value
elif option == "-o" or option == "--out":
out_dir = value
except GetoptError:
print usage_string
exit(1)
r2 = r2pipe.open(engine_path)
r2.cmd("oo+")
for frame_no in range(1, 88):
lvl_path = os.path.join(out_dir, "%d.lvl" % frame_no)
write_frame(frame_no, out_dir)
|
<commit_before><commit_msg>Add a script to write frame changes to the executable.<commit_after>#!/usr/bin/env python
import os
import sys
import json
import r2pipe
from getopt import getopt, GetoptError
r2 = None
def write_frame(frame_no, lvl_prefix):
lvl_path = os.path.join(lvl_prefix, "%d.lvl" % frame_no)
print "writing frame %d from %s..." % (frame_no, lvl_path)
lvl_frame = json.load(fp = open(lvl_path, "r"))
for obj in lvl_frame['objects']:
if obj["error"] != 0:
continue
r2.cmd("s %d" % obj["addr_pos_x"])
r2.cmd("s+ 1")
r2.cmd("wv4 %d" % obj["pos_x"])
r2.cmd("s %d" % obj["addr_pos_y"])
r2.cmd("s+ 1")
r2.cmd("wv4 %d" % obj["pos_y"])
if __name__ == "__main__":
engine_path = "Chowdren"
out_dir = "."
try:
opt_val, params = getopt(sys.argv[1:], "i:o:", ["in=", "out="])
for option, value in opt_val:
if option == "-i" or option == "--in":
engine_path = value
elif option == "-o" or option == "--out":
out_dir = value
except GetoptError:
print usage_string
exit(1)
r2 = r2pipe.open(engine_path)
r2.cmd("oo+")
for frame_no in range(1, 88):
lvl_path = os.path.join(out_dir, "%d.lvl" % frame_no)
write_frame(frame_no, out_dir)
|
|
e4e13c5be054707ea08cf18da36f5b01f745c818
|
mezzanine/twitter/__init__.py
|
mezzanine/twitter/__init__.py
|
"""
Provides models and utilities for displaying different types of Twitter feeds.
"""
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from mezzanine import __version__
# Constants/choices for the different query types.
QUERY_TYPE_USER = "user"
QUERY_TYPE_LIST = "list"
QUERY_TYPE_SEARCH = "search"
QUERY_TYPE_CHOICES = (
(QUERY_TYPE_USER, _("User")),
(QUERY_TYPE_LIST, _("List")),
(QUERY_TYPE_SEARCH, _("Search")),
)
def get_auth_settings():
"""
Returns all the key/secret settings for Twitter access,
only if they're all defined.
"""
from mezzanine.conf import settings
settings.use_editable()
auth_settings = (settings.TWITTER_CONSUMER_KEY,
settings.TWITTER_CONSUMER_SECRET,
settings.TWITTER_ACCESS_TOKEN_KEY,
settings.TWITTER_ACCESS_TOKEN_SECRET)
return auth_settings if all(auth_settings) else None
|
"""
Provides models and utilities for displaying different types of Twitter feeds.
"""
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from mezzanine import __version__
# Constants/choices for the different query types.
QUERY_TYPE_USER = "user"
QUERY_TYPE_LIST = "list"
QUERY_TYPE_SEARCH = "search"
QUERY_TYPE_CHOICES = (
(QUERY_TYPE_USER, _("User")),
(QUERY_TYPE_LIST, _("List")),
(QUERY_TYPE_SEARCH, _("Search")),
)
def get_auth_settings():
"""
Returns all the key/secret settings for Twitter access,
only if they're all defined.
"""
from mezzanine.conf import settings
settings.use_editable()
try:
auth_settings = (settings.TWITTER_CONSUMER_KEY,
settings.TWITTER_CONSUMER_SECRET,
settings.TWITTER_ACCESS_TOKEN_KEY,
settings.TWITTER_ACCESS_TOKEN_SECRET)
except AttributeError:
return None
else:
return auth_settings if all(auth_settings) else None
|
Fix error raised when twitter lib is installed, but mezzanine.twitter is removed from INSTALLED_APPS.
|
Fix error raised when twitter lib is installed, but mezzanine.twitter is removed from INSTALLED_APPS.
|
Python
|
bsd-2-clause
|
damnfine/mezzanine,frankchin/mezzanine,mush42/mezzanine,promil23/mezzanine,ZeroXn/mezzanine,Kniyl/mezzanine,viaregio/mezzanine,damnfine/mezzanine,douglaskastle/mezzanine,nikolas/mezzanine,ZeroXn/mezzanine,readevalprint/mezzanine,promil23/mezzanine,Skytorn86/mezzanine,webounty/mezzanine,frankier/mezzanine,frankier/mezzanine,ryneeverett/mezzanine,nikolas/mezzanine,christianwgd/mezzanine,mush42/mezzanine,christianwgd/mezzanine,webounty/mezzanine,biomassives/mezzanine,dustinrb/mezzanine,gradel/mezzanine,theclanks/mezzanine,molokov/mezzanine,sjdines/mezzanine,readevalprint/mezzanine,spookylukey/mezzanine,gradel/mezzanine,joshcartme/mezzanine,emile2016/mezzanine,sjdines/mezzanine,Kniyl/mezzanine,Skytorn86/mezzanine,biomassives/mezzanine,industrydive/mezzanine,gradel/mezzanine,emile2016/mezzanine,adrian-the-git/mezzanine,Skytorn86/mezzanine,geodesign/mezzanine,eino-makitalo/mezzanine,SoLoHiC/mezzanine,joshcartme/mezzanine,dustinrb/mezzanine,PegasusWang/mezzanine,tuxinhang1989/mezzanine,dsanders11/mezzanine,industrydive/mezzanine,wyzex/mezzanine,dovydas/mezzanine,saintbird/mezzanine,ryneeverett/mezzanine,dsanders11/mezzanine,molokov/mezzanine,industrydive/mezzanine,saintbird/mezzanine,christianwgd/mezzanine,tuxinhang1989/mezzanine,sjdines/mezzanine,saintbird/mezzanine,molokov/mezzanine,sjuxax/mezzanine,viaregio/mezzanine,wyzex/mezzanine,PegasusWang/mezzanine,geodesign/mezzanine,stephenmcd/mezzanine,jjz/mezzanine,Cicero-Zhao/mezzanine,eino-makitalo/mezzanine,jerivas/mezzanine,douglaskastle/mezzanine,dovydas/mezzanine,ryneeverett/mezzanine,PegasusWang/mezzanine,vladir/mezzanine,wyzex/mezzanine,adrian-the-git/mezzanine,viaregio/mezzanine,ZeroXn/mezzanine,SoLoHiC/mezzanine,spookylukey/mezzanine,wbtuomela/mezzanine,jerivas/mezzanine,joshcartme/mezzanine,Cajoline/mezzanine,vladir/mezzanine,dsanders11/mezzanine,vladir/mezzanine,adrian-the-git/mezzanine,nikolas/mezzanine,emile2016/mezzanine,promil23/mezzanine,frankchin/mezzanine,tuxinhang1989/mezzanine,wbtuomela/mezzanine,jjz/mezzanine,sjuxax/mezzanine,wbtuomela/mezzanine,Cicero-Zhao/mezzanine,frankchin/mezzanine,readevalprint/mezzanine,sjuxax/mezzanine,Kniyl/mezzanine,spookylukey/mezzanine,mush42/mezzanine,stephenmcd/mezzanine,damnfine/mezzanine,dustinrb/mezzanine,Cajoline/mezzanine,jerivas/mezzanine,Cajoline/mezzanine,theclanks/mezzanine,douglaskastle/mezzanine,stephenmcd/mezzanine,frankier/mezzanine,eino-makitalo/mezzanine,geodesign/mezzanine,SoLoHiC/mezzanine,jjz/mezzanine,dovydas/mezzanine,theclanks/mezzanine,webounty/mezzanine,biomassives/mezzanine
|
"""
Provides models and utilities for displaying different types of Twitter feeds.
"""
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from mezzanine import __version__
# Constants/choices for the different query types.
QUERY_TYPE_USER = "user"
QUERY_TYPE_LIST = "list"
QUERY_TYPE_SEARCH = "search"
QUERY_TYPE_CHOICES = (
(QUERY_TYPE_USER, _("User")),
(QUERY_TYPE_LIST, _("List")),
(QUERY_TYPE_SEARCH, _("Search")),
)
def get_auth_settings():
"""
Returns all the key/secret settings for Twitter access,
only if they're all defined.
"""
from mezzanine.conf import settings
settings.use_editable()
auth_settings = (settings.TWITTER_CONSUMER_KEY,
settings.TWITTER_CONSUMER_SECRET,
settings.TWITTER_ACCESS_TOKEN_KEY,
settings.TWITTER_ACCESS_TOKEN_SECRET)
return auth_settings if all(auth_settings) else None
Fix error raised when twitter lib is installed, but mezzanine.twitter is removed from INSTALLED_APPS.
|
"""
Provides models and utilities for displaying different types of Twitter feeds.
"""
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from mezzanine import __version__
# Constants/choices for the different query types.
QUERY_TYPE_USER = "user"
QUERY_TYPE_LIST = "list"
QUERY_TYPE_SEARCH = "search"
QUERY_TYPE_CHOICES = (
(QUERY_TYPE_USER, _("User")),
(QUERY_TYPE_LIST, _("List")),
(QUERY_TYPE_SEARCH, _("Search")),
)
def get_auth_settings():
"""
Returns all the key/secret settings for Twitter access,
only if they're all defined.
"""
from mezzanine.conf import settings
settings.use_editable()
try:
auth_settings = (settings.TWITTER_CONSUMER_KEY,
settings.TWITTER_CONSUMER_SECRET,
settings.TWITTER_ACCESS_TOKEN_KEY,
settings.TWITTER_ACCESS_TOKEN_SECRET)
except AttributeError:
return None
else:
return auth_settings if all(auth_settings) else None
|
<commit_before>"""
Provides models and utilities for displaying different types of Twitter feeds.
"""
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from mezzanine import __version__
# Constants/choices for the different query types.
QUERY_TYPE_USER = "user"
QUERY_TYPE_LIST = "list"
QUERY_TYPE_SEARCH = "search"
QUERY_TYPE_CHOICES = (
(QUERY_TYPE_USER, _("User")),
(QUERY_TYPE_LIST, _("List")),
(QUERY_TYPE_SEARCH, _("Search")),
)
def get_auth_settings():
"""
Returns all the key/secret settings for Twitter access,
only if they're all defined.
"""
from mezzanine.conf import settings
settings.use_editable()
auth_settings = (settings.TWITTER_CONSUMER_KEY,
settings.TWITTER_CONSUMER_SECRET,
settings.TWITTER_ACCESS_TOKEN_KEY,
settings.TWITTER_ACCESS_TOKEN_SECRET)
return auth_settings if all(auth_settings) else None
<commit_msg>Fix error raised when twitter lib is installed, but mezzanine.twitter is removed from INSTALLED_APPS.<commit_after>
|
"""
Provides models and utilities for displaying different types of Twitter feeds.
"""
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from mezzanine import __version__
# Constants/choices for the different query types.
QUERY_TYPE_USER = "user"
QUERY_TYPE_LIST = "list"
QUERY_TYPE_SEARCH = "search"
QUERY_TYPE_CHOICES = (
(QUERY_TYPE_USER, _("User")),
(QUERY_TYPE_LIST, _("List")),
(QUERY_TYPE_SEARCH, _("Search")),
)
def get_auth_settings():
"""
Returns all the key/secret settings for Twitter access,
only if they're all defined.
"""
from mezzanine.conf import settings
settings.use_editable()
try:
auth_settings = (settings.TWITTER_CONSUMER_KEY,
settings.TWITTER_CONSUMER_SECRET,
settings.TWITTER_ACCESS_TOKEN_KEY,
settings.TWITTER_ACCESS_TOKEN_SECRET)
except AttributeError:
return None
else:
return auth_settings if all(auth_settings) else None
|
"""
Provides models and utilities for displaying different types of Twitter feeds.
"""
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from mezzanine import __version__
# Constants/choices for the different query types.
QUERY_TYPE_USER = "user"
QUERY_TYPE_LIST = "list"
QUERY_TYPE_SEARCH = "search"
QUERY_TYPE_CHOICES = (
(QUERY_TYPE_USER, _("User")),
(QUERY_TYPE_LIST, _("List")),
(QUERY_TYPE_SEARCH, _("Search")),
)
def get_auth_settings():
"""
Returns all the key/secret settings for Twitter access,
only if they're all defined.
"""
from mezzanine.conf import settings
settings.use_editable()
auth_settings = (settings.TWITTER_CONSUMER_KEY,
settings.TWITTER_CONSUMER_SECRET,
settings.TWITTER_ACCESS_TOKEN_KEY,
settings.TWITTER_ACCESS_TOKEN_SECRET)
return auth_settings if all(auth_settings) else None
Fix error raised when twitter lib is installed, but mezzanine.twitter is removed from INSTALLED_APPS."""
Provides models and utilities for displaying different types of Twitter feeds.
"""
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from mezzanine import __version__
# Constants/choices for the different query types.
QUERY_TYPE_USER = "user"
QUERY_TYPE_LIST = "list"
QUERY_TYPE_SEARCH = "search"
QUERY_TYPE_CHOICES = (
(QUERY_TYPE_USER, _("User")),
(QUERY_TYPE_LIST, _("List")),
(QUERY_TYPE_SEARCH, _("Search")),
)
def get_auth_settings():
"""
Returns all the key/secret settings for Twitter access,
only if they're all defined.
"""
from mezzanine.conf import settings
settings.use_editable()
try:
auth_settings = (settings.TWITTER_CONSUMER_KEY,
settings.TWITTER_CONSUMER_SECRET,
settings.TWITTER_ACCESS_TOKEN_KEY,
settings.TWITTER_ACCESS_TOKEN_SECRET)
except AttributeError:
return None
else:
return auth_settings if all(auth_settings) else None
|
<commit_before>"""
Provides models and utilities for displaying different types of Twitter feeds.
"""
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from mezzanine import __version__
# Constants/choices for the different query types.
QUERY_TYPE_USER = "user"
QUERY_TYPE_LIST = "list"
QUERY_TYPE_SEARCH = "search"
QUERY_TYPE_CHOICES = (
(QUERY_TYPE_USER, _("User")),
(QUERY_TYPE_LIST, _("List")),
(QUERY_TYPE_SEARCH, _("Search")),
)
def get_auth_settings():
"""
Returns all the key/secret settings for Twitter access,
only if they're all defined.
"""
from mezzanine.conf import settings
settings.use_editable()
auth_settings = (settings.TWITTER_CONSUMER_KEY,
settings.TWITTER_CONSUMER_SECRET,
settings.TWITTER_ACCESS_TOKEN_KEY,
settings.TWITTER_ACCESS_TOKEN_SECRET)
return auth_settings if all(auth_settings) else None
<commit_msg>Fix error raised when twitter lib is installed, but mezzanine.twitter is removed from INSTALLED_APPS.<commit_after>"""
Provides models and utilities for displaying different types of Twitter feeds.
"""
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from mezzanine import __version__
# Constants/choices for the different query types.
QUERY_TYPE_USER = "user"
QUERY_TYPE_LIST = "list"
QUERY_TYPE_SEARCH = "search"
QUERY_TYPE_CHOICES = (
(QUERY_TYPE_USER, _("User")),
(QUERY_TYPE_LIST, _("List")),
(QUERY_TYPE_SEARCH, _("Search")),
)
def get_auth_settings():
"""
Returns all the key/secret settings for Twitter access,
only if they're all defined.
"""
from mezzanine.conf import settings
settings.use_editable()
try:
auth_settings = (settings.TWITTER_CONSUMER_KEY,
settings.TWITTER_CONSUMER_SECRET,
settings.TWITTER_ACCESS_TOKEN_KEY,
settings.TWITTER_ACCESS_TOKEN_SECRET)
except AttributeError:
return None
else:
return auth_settings if all(auth_settings) else None
|
72b17165cbe2d9a46d2c66abf4919321f02c07c6
|
docs/conf.py
|
docs/conf.py
|
# -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing
# directory.
import sys, os
sys.path.append(os.path.join(os.path.abspath('.'), '_ext'))
sys.path.append(os.path.dirname(os.path.abspath('.')))
import analytical
# -- General configuration -----------------------------------------------------
project = u'django-analytical'
copyright = u'2011, Joost Cassee <joost@cassee.net>'
release = analytical.__version__
# The short X.Y version.
version = release.rsplit('.', 1)[0]
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'local']
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
add_function_parentheses = True
pygments_style = 'sphinx'
intersphinx_mapping = {
'http://docs.python.org/2.7': None,
'http://docs.djangoproject.com/en/1.7': 'http://docs.djangoproject.com/en/1.7/_objects/',
}
# -- Options for HTML output ---------------------------------------------------
html_theme = 'default'
htmlhelp_basename = 'analyticaldoc'
# -- Options for LaTeX output --------------------------------------------------
latex_documents = [
('index', 'django-analytical.tex', u'Documentation for django-analytical',
u'Joost Cassee', 'manual'),
]
|
# -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing
# directory.
import sys, os
sys.path.append(os.path.join(os.path.abspath('.'), '_ext'))
sys.path.append(os.path.dirname(os.path.abspath('.')))
import analytical
# -- General configuration -----------------------------------------------------
project = u'django-analytical'
copyright = u'2011, Joost Cassee <joost@cassee.net>'
release = analytical.__version__
# The short X.Y version.
version = release.rsplit('.', 1)[0]
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'local']
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
add_function_parentheses = True
pygments_style = 'sphinx'
intersphinx_mapping = {
'http://docs.python.org/2.7': None,
'http://docs.djangoproject.com/en/1.8': 'http://docs.djangoproject.com/en/1.8/_objects/',
}
# -- Options for HTML output ---------------------------------------------------
html_theme = 'default'
htmlhelp_basename = 'analyticaldoc'
# -- Options for LaTeX output --------------------------------------------------
latex_documents = [
('index', 'django-analytical.tex', u'Documentation for django-analytical',
u'Joost Cassee', 'manual'),
]
|
Update intersphinx mapping to Django 1.8
|
Update intersphinx mapping to Django 1.8
|
Python
|
mit
|
bittner/django-analytical,machtfit/django-analytical,apocquet/django-analytical,jcassee/django-analytical,ericdwang/django-analytical,pjdelport/django-analytical,ChristosChristofidis/django-analytical
|
# -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing
# directory.
import sys, os
sys.path.append(os.path.join(os.path.abspath('.'), '_ext'))
sys.path.append(os.path.dirname(os.path.abspath('.')))
import analytical
# -- General configuration -----------------------------------------------------
project = u'django-analytical'
copyright = u'2011, Joost Cassee <joost@cassee.net>'
release = analytical.__version__
# The short X.Y version.
version = release.rsplit('.', 1)[0]
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'local']
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
add_function_parentheses = True
pygments_style = 'sphinx'
intersphinx_mapping = {
'http://docs.python.org/2.7': None,
'http://docs.djangoproject.com/en/1.7': 'http://docs.djangoproject.com/en/1.7/_objects/',
}
# -- Options for HTML output ---------------------------------------------------
html_theme = 'default'
htmlhelp_basename = 'analyticaldoc'
# -- Options for LaTeX output --------------------------------------------------
latex_documents = [
('index', 'django-analytical.tex', u'Documentation for django-analytical',
u'Joost Cassee', 'manual'),
]
Update intersphinx mapping to Django 1.8
|
# -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing
# directory.
import sys, os
sys.path.append(os.path.join(os.path.abspath('.'), '_ext'))
sys.path.append(os.path.dirname(os.path.abspath('.')))
import analytical
# -- General configuration -----------------------------------------------------
project = u'django-analytical'
copyright = u'2011, Joost Cassee <joost@cassee.net>'
release = analytical.__version__
# The short X.Y version.
version = release.rsplit('.', 1)[0]
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'local']
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
add_function_parentheses = True
pygments_style = 'sphinx'
intersphinx_mapping = {
'http://docs.python.org/2.7': None,
'http://docs.djangoproject.com/en/1.8': 'http://docs.djangoproject.com/en/1.8/_objects/',
}
# -- Options for HTML output ---------------------------------------------------
html_theme = 'default'
htmlhelp_basename = 'analyticaldoc'
# -- Options for LaTeX output --------------------------------------------------
latex_documents = [
('index', 'django-analytical.tex', u'Documentation for django-analytical',
u'Joost Cassee', 'manual'),
]
|
<commit_before># -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing
# directory.
import sys, os
sys.path.append(os.path.join(os.path.abspath('.'), '_ext'))
sys.path.append(os.path.dirname(os.path.abspath('.')))
import analytical
# -- General configuration -----------------------------------------------------
project = u'django-analytical'
copyright = u'2011, Joost Cassee <joost@cassee.net>'
release = analytical.__version__
# The short X.Y version.
version = release.rsplit('.', 1)[0]
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'local']
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
add_function_parentheses = True
pygments_style = 'sphinx'
intersphinx_mapping = {
'http://docs.python.org/2.7': None,
'http://docs.djangoproject.com/en/1.7': 'http://docs.djangoproject.com/en/1.7/_objects/',
}
# -- Options for HTML output ---------------------------------------------------
html_theme = 'default'
htmlhelp_basename = 'analyticaldoc'
# -- Options for LaTeX output --------------------------------------------------
latex_documents = [
('index', 'django-analytical.tex', u'Documentation for django-analytical',
u'Joost Cassee', 'manual'),
]
<commit_msg>Update intersphinx mapping to Django 1.8<commit_after>
|
# -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing
# directory.
import sys, os
sys.path.append(os.path.join(os.path.abspath('.'), '_ext'))
sys.path.append(os.path.dirname(os.path.abspath('.')))
import analytical
# -- General configuration -----------------------------------------------------
project = u'django-analytical'
copyright = u'2011, Joost Cassee <joost@cassee.net>'
release = analytical.__version__
# The short X.Y version.
version = release.rsplit('.', 1)[0]
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'local']
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
add_function_parentheses = True
pygments_style = 'sphinx'
intersphinx_mapping = {
'http://docs.python.org/2.7': None,
'http://docs.djangoproject.com/en/1.8': 'http://docs.djangoproject.com/en/1.8/_objects/',
}
# -- Options for HTML output ---------------------------------------------------
html_theme = 'default'
htmlhelp_basename = 'analyticaldoc'
# -- Options for LaTeX output --------------------------------------------------
latex_documents = [
('index', 'django-analytical.tex', u'Documentation for django-analytical',
u'Joost Cassee', 'manual'),
]
|
# -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing
# directory.
import sys, os
sys.path.append(os.path.join(os.path.abspath('.'), '_ext'))
sys.path.append(os.path.dirname(os.path.abspath('.')))
import analytical
# -- General configuration -----------------------------------------------------
project = u'django-analytical'
copyright = u'2011, Joost Cassee <joost@cassee.net>'
release = analytical.__version__
# The short X.Y version.
version = release.rsplit('.', 1)[0]
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'local']
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
add_function_parentheses = True
pygments_style = 'sphinx'
intersphinx_mapping = {
'http://docs.python.org/2.7': None,
'http://docs.djangoproject.com/en/1.7': 'http://docs.djangoproject.com/en/1.7/_objects/',
}
# -- Options for HTML output ---------------------------------------------------
html_theme = 'default'
htmlhelp_basename = 'analyticaldoc'
# -- Options for LaTeX output --------------------------------------------------
latex_documents = [
('index', 'django-analytical.tex', u'Documentation for django-analytical',
u'Joost Cassee', 'manual'),
]
Update intersphinx mapping to Django 1.8# -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing
# directory.
import sys, os
sys.path.append(os.path.join(os.path.abspath('.'), '_ext'))
sys.path.append(os.path.dirname(os.path.abspath('.')))
import analytical
# -- General configuration -----------------------------------------------------
project = u'django-analytical'
copyright = u'2011, Joost Cassee <joost@cassee.net>'
release = analytical.__version__
# The short X.Y version.
version = release.rsplit('.', 1)[0]
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'local']
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
add_function_parentheses = True
pygments_style = 'sphinx'
intersphinx_mapping = {
'http://docs.python.org/2.7': None,
'http://docs.djangoproject.com/en/1.8': 'http://docs.djangoproject.com/en/1.8/_objects/',
}
# -- Options for HTML output ---------------------------------------------------
html_theme = 'default'
htmlhelp_basename = 'analyticaldoc'
# -- Options for LaTeX output --------------------------------------------------
latex_documents = [
('index', 'django-analytical.tex', u'Documentation for django-analytical',
u'Joost Cassee', 'manual'),
]
|
<commit_before># -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing
# directory.
import sys, os
sys.path.append(os.path.join(os.path.abspath('.'), '_ext'))
sys.path.append(os.path.dirname(os.path.abspath('.')))
import analytical
# -- General configuration -----------------------------------------------------
project = u'django-analytical'
copyright = u'2011, Joost Cassee <joost@cassee.net>'
release = analytical.__version__
# The short X.Y version.
version = release.rsplit('.', 1)[0]
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'local']
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
add_function_parentheses = True
pygments_style = 'sphinx'
intersphinx_mapping = {
'http://docs.python.org/2.7': None,
'http://docs.djangoproject.com/en/1.7': 'http://docs.djangoproject.com/en/1.7/_objects/',
}
# -- Options for HTML output ---------------------------------------------------
html_theme = 'default'
htmlhelp_basename = 'analyticaldoc'
# -- Options for LaTeX output --------------------------------------------------
latex_documents = [
('index', 'django-analytical.tex', u'Documentation for django-analytical',
u'Joost Cassee', 'manual'),
]
<commit_msg>Update intersphinx mapping to Django 1.8<commit_after># -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing
# directory.
import sys, os
sys.path.append(os.path.join(os.path.abspath('.'), '_ext'))
sys.path.append(os.path.dirname(os.path.abspath('.')))
import analytical
# -- General configuration -----------------------------------------------------
project = u'django-analytical'
copyright = u'2011, Joost Cassee <joost@cassee.net>'
release = analytical.__version__
# The short X.Y version.
version = release.rsplit('.', 1)[0]
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'local']
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
add_function_parentheses = True
pygments_style = 'sphinx'
intersphinx_mapping = {
'http://docs.python.org/2.7': None,
'http://docs.djangoproject.com/en/1.8': 'http://docs.djangoproject.com/en/1.8/_objects/',
}
# -- Options for HTML output ---------------------------------------------------
html_theme = 'default'
htmlhelp_basename = 'analyticaldoc'
# -- Options for LaTeX output --------------------------------------------------
latex_documents = [
('index', 'django-analytical.tex', u'Documentation for django-analytical',
u'Joost Cassee', 'manual'),
]
|
761c5e18abca2ac5baf837d9da66cf2f5bb04c01
|
typhon/tests/files/test_utils.py
|
typhon/tests/files/test_utils.py
|
from tempfile import NamedTemporaryFile
from typhon.files import compress, decompress
class TestCompression:
data = "ABCDEFGHIJKLMNOPQRSTUVWXYZ12345678910"
def create_file(self, filename):
with open(filename, "w") as file:
file.write(self.data)
def check_file(self, filename):
with open(filename) as file:
return self.data == file.readline()
def test_compress_decompress_zip(self):
with NamedTemporaryFile() as file:
with compress(file.name+".zip") as compressed_file:
self.create_file(compressed_file)
with decompress(file.name+".zip") as uncompressed_file:
assert self.check_file(uncompressed_file)
def test_compress_decompress_gzip(self):
with NamedTemporaryFile() as file:
with compress(file.name+".gz") as compressed_file:
self.create_file(compressed_file)
with decompress(file.name+".gz") as uncompressed_file:
assert self.check_file(uncompressed_file)
def test_compress_decompress_bz2(self):
with NamedTemporaryFile() as file:
with compress(file.name+".bz2") as compressed_file:
self.create_file(compressed_file)
with decompress(file.name+".bz2") as uncompressed_file:
assert self.check_file(uncompressed_file)
def test_compress_decompress_lzma(self):
with NamedTemporaryFile() as file:
with compress(file.name+".xz") as compressed_file:
self.create_file(compressed_file)
with decompress(file.name+".xz") as uncompressed_file:
assert self.check_file(uncompressed_file)
|
Add tests for compression and decompression functions
|
Add tests for compression and decompression functions
|
Python
|
mit
|
atmtools/typhon,atmtools/typhon
|
Add tests for compression and decompression functions
|
from tempfile import NamedTemporaryFile
from typhon.files import compress, decompress
class TestCompression:
data = "ABCDEFGHIJKLMNOPQRSTUVWXYZ12345678910"
def create_file(self, filename):
with open(filename, "w") as file:
file.write(self.data)
def check_file(self, filename):
with open(filename) as file:
return self.data == file.readline()
def test_compress_decompress_zip(self):
with NamedTemporaryFile() as file:
with compress(file.name+".zip") as compressed_file:
self.create_file(compressed_file)
with decompress(file.name+".zip") as uncompressed_file:
assert self.check_file(uncompressed_file)
def test_compress_decompress_gzip(self):
with NamedTemporaryFile() as file:
with compress(file.name+".gz") as compressed_file:
self.create_file(compressed_file)
with decompress(file.name+".gz") as uncompressed_file:
assert self.check_file(uncompressed_file)
def test_compress_decompress_bz2(self):
with NamedTemporaryFile() as file:
with compress(file.name+".bz2") as compressed_file:
self.create_file(compressed_file)
with decompress(file.name+".bz2") as uncompressed_file:
assert self.check_file(uncompressed_file)
def test_compress_decompress_lzma(self):
with NamedTemporaryFile() as file:
with compress(file.name+".xz") as compressed_file:
self.create_file(compressed_file)
with decompress(file.name+".xz") as uncompressed_file:
assert self.check_file(uncompressed_file)
|
<commit_before><commit_msg>Add tests for compression and decompression functions<commit_after>
|
from tempfile import NamedTemporaryFile
from typhon.files import compress, decompress
class TestCompression:
data = "ABCDEFGHIJKLMNOPQRSTUVWXYZ12345678910"
def create_file(self, filename):
with open(filename, "w") as file:
file.write(self.data)
def check_file(self, filename):
with open(filename) as file:
return self.data == file.readline()
def test_compress_decompress_zip(self):
with NamedTemporaryFile() as file:
with compress(file.name+".zip") as compressed_file:
self.create_file(compressed_file)
with decompress(file.name+".zip") as uncompressed_file:
assert self.check_file(uncompressed_file)
def test_compress_decompress_gzip(self):
with NamedTemporaryFile() as file:
with compress(file.name+".gz") as compressed_file:
self.create_file(compressed_file)
with decompress(file.name+".gz") as uncompressed_file:
assert self.check_file(uncompressed_file)
def test_compress_decompress_bz2(self):
with NamedTemporaryFile() as file:
with compress(file.name+".bz2") as compressed_file:
self.create_file(compressed_file)
with decompress(file.name+".bz2") as uncompressed_file:
assert self.check_file(uncompressed_file)
def test_compress_decompress_lzma(self):
with NamedTemporaryFile() as file:
with compress(file.name+".xz") as compressed_file:
self.create_file(compressed_file)
with decompress(file.name+".xz") as uncompressed_file:
assert self.check_file(uncompressed_file)
|
Add tests for compression and decompression functionsfrom tempfile import NamedTemporaryFile
from typhon.files import compress, decompress
class TestCompression:
data = "ABCDEFGHIJKLMNOPQRSTUVWXYZ12345678910"
def create_file(self, filename):
with open(filename, "w") as file:
file.write(self.data)
def check_file(self, filename):
with open(filename) as file:
return self.data == file.readline()
def test_compress_decompress_zip(self):
with NamedTemporaryFile() as file:
with compress(file.name+".zip") as compressed_file:
self.create_file(compressed_file)
with decompress(file.name+".zip") as uncompressed_file:
assert self.check_file(uncompressed_file)
def test_compress_decompress_gzip(self):
with NamedTemporaryFile() as file:
with compress(file.name+".gz") as compressed_file:
self.create_file(compressed_file)
with decompress(file.name+".gz") as uncompressed_file:
assert self.check_file(uncompressed_file)
def test_compress_decompress_bz2(self):
with NamedTemporaryFile() as file:
with compress(file.name+".bz2") as compressed_file:
self.create_file(compressed_file)
with decompress(file.name+".bz2") as uncompressed_file:
assert self.check_file(uncompressed_file)
def test_compress_decompress_lzma(self):
with NamedTemporaryFile() as file:
with compress(file.name+".xz") as compressed_file:
self.create_file(compressed_file)
with decompress(file.name+".xz") as uncompressed_file:
assert self.check_file(uncompressed_file)
|
<commit_before><commit_msg>Add tests for compression and decompression functions<commit_after>from tempfile import NamedTemporaryFile
from typhon.files import compress, decompress
class TestCompression:
data = "ABCDEFGHIJKLMNOPQRSTUVWXYZ12345678910"
def create_file(self, filename):
with open(filename, "w") as file:
file.write(self.data)
def check_file(self, filename):
with open(filename) as file:
return self.data == file.readline()
def test_compress_decompress_zip(self):
with NamedTemporaryFile() as file:
with compress(file.name+".zip") as compressed_file:
self.create_file(compressed_file)
with decompress(file.name+".zip") as uncompressed_file:
assert self.check_file(uncompressed_file)
def test_compress_decompress_gzip(self):
with NamedTemporaryFile() as file:
with compress(file.name+".gz") as compressed_file:
self.create_file(compressed_file)
with decompress(file.name+".gz") as uncompressed_file:
assert self.check_file(uncompressed_file)
def test_compress_decompress_bz2(self):
with NamedTemporaryFile() as file:
with compress(file.name+".bz2") as compressed_file:
self.create_file(compressed_file)
with decompress(file.name+".bz2") as uncompressed_file:
assert self.check_file(uncompressed_file)
def test_compress_decompress_lzma(self):
with NamedTemporaryFile() as file:
with compress(file.name+".xz") as compressed_file:
self.create_file(compressed_file)
with decompress(file.name+".xz") as uncompressed_file:
assert self.check_file(uncompressed_file)
|
|
b3b340764c2f98e9e6393c9e259a7dd7b697167b
|
oslo/vmware/constants.py
|
oslo/vmware/constants.py
|
# Copyright (c) 2014 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Shared constants across the VMware ecosystem.
"""
# Datacenter path for HTTP access to datastores if the target server is an ESX/
# ESXi system: http://goo.gl/B5Htr8 for more information.
ESX_DATACENTER_PATH = 'ha-datacenter'
|
Add constant for ESX datacenter path (HTTP access)
|
Add constant for ESX datacenter path (HTTP access)
This patch adds a constant.py file to store the constants needed in
the VMware ecosystem. A new constant is added for the ESX datacenter
path when using http access to datastores.
Change-Id: Ie5b84b3cc3913ab57f7ab487349557781cc4157a
|
Python
|
apache-2.0
|
openstack/oslo.vmware
|
Add constant for ESX datacenter path (HTTP access)
This patch adds a constant.py file to store the constants needed in
the VMware ecosystem. A new constant is added for the ESX datacenter
path when using http access to datastores.
Change-Id: Ie5b84b3cc3913ab57f7ab487349557781cc4157a
|
# Copyright (c) 2014 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Shared constants across the VMware ecosystem.
"""
# Datacenter path for HTTP access to datastores if the target server is an ESX/
# ESXi system: http://goo.gl/B5Htr8 for more information.
ESX_DATACENTER_PATH = 'ha-datacenter'
|
<commit_before><commit_msg>Add constant for ESX datacenter path (HTTP access)
This patch adds a constant.py file to store the constants needed in
the VMware ecosystem. A new constant is added for the ESX datacenter
path when using http access to datastores.
Change-Id: Ie5b84b3cc3913ab57f7ab487349557781cc4157a<commit_after>
|
# Copyright (c) 2014 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Shared constants across the VMware ecosystem.
"""
# Datacenter path for HTTP access to datastores if the target server is an ESX/
# ESXi system: http://goo.gl/B5Htr8 for more information.
ESX_DATACENTER_PATH = 'ha-datacenter'
|
Add constant for ESX datacenter path (HTTP access)
This patch adds a constant.py file to store the constants needed in
the VMware ecosystem. A new constant is added for the ESX datacenter
path when using http access to datastores.
Change-Id: Ie5b84b3cc3913ab57f7ab487349557781cc4157a# Copyright (c) 2014 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Shared constants across the VMware ecosystem.
"""
# Datacenter path for HTTP access to datastores if the target server is an ESX/
# ESXi system: http://goo.gl/B5Htr8 for more information.
ESX_DATACENTER_PATH = 'ha-datacenter'
|
<commit_before><commit_msg>Add constant for ESX datacenter path (HTTP access)
This patch adds a constant.py file to store the constants needed in
the VMware ecosystem. A new constant is added for the ESX datacenter
path when using http access to datastores.
Change-Id: Ie5b84b3cc3913ab57f7ab487349557781cc4157a<commit_after># Copyright (c) 2014 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Shared constants across the VMware ecosystem.
"""
# Datacenter path for HTTP access to datastores if the target server is an ESX/
# ESXi system: http://goo.gl/B5Htr8 for more information.
ESX_DATACENTER_PATH = 'ha-datacenter'
|
|
69ad33e03263a7bcb4323460302e2716c34891e3
|
st2common/tests/unit/test_logging_middleware.py
|
st2common/tests/unit/test_logging_middleware.py
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unittest2
from st2common.middleware.logging import LoggingMiddleware
from st2common.constants.secrets import MASKED_ATTRIBUTE_VALUE
__all__ = [
'LoggingMiddlewareTestCase'
]
class LoggingMiddlewareTestCase(unittest2.TestCase):
@mock.patch('st2common.middleware.logging.LOG')
@mock.patch('st2common.middleware.logging.Request')
def test_secret_parameters_are_masked_in_log_message(self, mock_request, mock_log):
def app(environ, custom_start_response):
custom_start_response(status='200 OK', headers=[('Content-Length', 100)])
return [None]
router = mock.Mock()
endpoint = mock.Mock()
router.match.return_value = (endpoint, None)
middleware = LoggingMiddleware(app=app, router=router)
environ = {}
mock_request.return_value.GET.dict_of_lists.return_value = {
'foo': 'bar',
'bar': 'baz',
'x-auth-token': 'secret',
'st2-api-key': 'secret',
'password': 'secret',
'st2_auth_token': 'secret',
'token': 'secret'
}
middleware(environ=environ, start_response=mock.Mock())
expected_query = {
'foo': 'bar',
'bar': 'baz',
'x-auth-token': MASKED_ATTRIBUTE_VALUE,
'st2-api-key': MASKED_ATTRIBUTE_VALUE,
'password': MASKED_ATTRIBUTE_VALUE,
'token': MASKED_ATTRIBUTE_VALUE,
'st2_auth_token': MASKED_ATTRIBUTE_VALUE
}
call_kwargs = mock_log.info.call_args_list[0][1]
query = call_kwargs['extra']['query']
self.assertEqual(query, expected_query)
|
Add a test case for masking secret values in API log messages.
|
Add a test case for masking secret values in API log messages.
|
Python
|
apache-2.0
|
nzlosh/st2,Plexxi/st2,nzlosh/st2,nzlosh/st2,StackStorm/st2,Plexxi/st2,Plexxi/st2,StackStorm/st2,StackStorm/st2,nzlosh/st2,StackStorm/st2,Plexxi/st2
|
Add a test case for masking secret values in API log messages.
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unittest2
from st2common.middleware.logging import LoggingMiddleware
from st2common.constants.secrets import MASKED_ATTRIBUTE_VALUE
__all__ = [
'LoggingMiddlewareTestCase'
]
class LoggingMiddlewareTestCase(unittest2.TestCase):
@mock.patch('st2common.middleware.logging.LOG')
@mock.patch('st2common.middleware.logging.Request')
def test_secret_parameters_are_masked_in_log_message(self, mock_request, mock_log):
def app(environ, custom_start_response):
custom_start_response(status='200 OK', headers=[('Content-Length', 100)])
return [None]
router = mock.Mock()
endpoint = mock.Mock()
router.match.return_value = (endpoint, None)
middleware = LoggingMiddleware(app=app, router=router)
environ = {}
mock_request.return_value.GET.dict_of_lists.return_value = {
'foo': 'bar',
'bar': 'baz',
'x-auth-token': 'secret',
'st2-api-key': 'secret',
'password': 'secret',
'st2_auth_token': 'secret',
'token': 'secret'
}
middleware(environ=environ, start_response=mock.Mock())
expected_query = {
'foo': 'bar',
'bar': 'baz',
'x-auth-token': MASKED_ATTRIBUTE_VALUE,
'st2-api-key': MASKED_ATTRIBUTE_VALUE,
'password': MASKED_ATTRIBUTE_VALUE,
'token': MASKED_ATTRIBUTE_VALUE,
'st2_auth_token': MASKED_ATTRIBUTE_VALUE
}
call_kwargs = mock_log.info.call_args_list[0][1]
query = call_kwargs['extra']['query']
self.assertEqual(query, expected_query)
|
<commit_before><commit_msg>Add a test case for masking secret values in API log messages.<commit_after>
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unittest2
from st2common.middleware.logging import LoggingMiddleware
from st2common.constants.secrets import MASKED_ATTRIBUTE_VALUE
__all__ = [
'LoggingMiddlewareTestCase'
]
class LoggingMiddlewareTestCase(unittest2.TestCase):
@mock.patch('st2common.middleware.logging.LOG')
@mock.patch('st2common.middleware.logging.Request')
def test_secret_parameters_are_masked_in_log_message(self, mock_request, mock_log):
def app(environ, custom_start_response):
custom_start_response(status='200 OK', headers=[('Content-Length', 100)])
return [None]
router = mock.Mock()
endpoint = mock.Mock()
router.match.return_value = (endpoint, None)
middleware = LoggingMiddleware(app=app, router=router)
environ = {}
mock_request.return_value.GET.dict_of_lists.return_value = {
'foo': 'bar',
'bar': 'baz',
'x-auth-token': 'secret',
'st2-api-key': 'secret',
'password': 'secret',
'st2_auth_token': 'secret',
'token': 'secret'
}
middleware(environ=environ, start_response=mock.Mock())
expected_query = {
'foo': 'bar',
'bar': 'baz',
'x-auth-token': MASKED_ATTRIBUTE_VALUE,
'st2-api-key': MASKED_ATTRIBUTE_VALUE,
'password': MASKED_ATTRIBUTE_VALUE,
'token': MASKED_ATTRIBUTE_VALUE,
'st2_auth_token': MASKED_ATTRIBUTE_VALUE
}
call_kwargs = mock_log.info.call_args_list[0][1]
query = call_kwargs['extra']['query']
self.assertEqual(query, expected_query)
|
Add a test case for masking secret values in API log messages.# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unittest2
from st2common.middleware.logging import LoggingMiddleware
from st2common.constants.secrets import MASKED_ATTRIBUTE_VALUE
__all__ = [
'LoggingMiddlewareTestCase'
]
class LoggingMiddlewareTestCase(unittest2.TestCase):
@mock.patch('st2common.middleware.logging.LOG')
@mock.patch('st2common.middleware.logging.Request')
def test_secret_parameters_are_masked_in_log_message(self, mock_request, mock_log):
def app(environ, custom_start_response):
custom_start_response(status='200 OK', headers=[('Content-Length', 100)])
return [None]
router = mock.Mock()
endpoint = mock.Mock()
router.match.return_value = (endpoint, None)
middleware = LoggingMiddleware(app=app, router=router)
environ = {}
mock_request.return_value.GET.dict_of_lists.return_value = {
'foo': 'bar',
'bar': 'baz',
'x-auth-token': 'secret',
'st2-api-key': 'secret',
'password': 'secret',
'st2_auth_token': 'secret',
'token': 'secret'
}
middleware(environ=environ, start_response=mock.Mock())
expected_query = {
'foo': 'bar',
'bar': 'baz',
'x-auth-token': MASKED_ATTRIBUTE_VALUE,
'st2-api-key': MASKED_ATTRIBUTE_VALUE,
'password': MASKED_ATTRIBUTE_VALUE,
'token': MASKED_ATTRIBUTE_VALUE,
'st2_auth_token': MASKED_ATTRIBUTE_VALUE
}
call_kwargs = mock_log.info.call_args_list[0][1]
query = call_kwargs['extra']['query']
self.assertEqual(query, expected_query)
|
<commit_before><commit_msg>Add a test case for masking secret values in API log messages.<commit_after># Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unittest2
from st2common.middleware.logging import LoggingMiddleware
from st2common.constants.secrets import MASKED_ATTRIBUTE_VALUE
__all__ = [
'LoggingMiddlewareTestCase'
]
class LoggingMiddlewareTestCase(unittest2.TestCase):
@mock.patch('st2common.middleware.logging.LOG')
@mock.patch('st2common.middleware.logging.Request')
def test_secret_parameters_are_masked_in_log_message(self, mock_request, mock_log):
def app(environ, custom_start_response):
custom_start_response(status='200 OK', headers=[('Content-Length', 100)])
return [None]
router = mock.Mock()
endpoint = mock.Mock()
router.match.return_value = (endpoint, None)
middleware = LoggingMiddleware(app=app, router=router)
environ = {}
mock_request.return_value.GET.dict_of_lists.return_value = {
'foo': 'bar',
'bar': 'baz',
'x-auth-token': 'secret',
'st2-api-key': 'secret',
'password': 'secret',
'st2_auth_token': 'secret',
'token': 'secret'
}
middleware(environ=environ, start_response=mock.Mock())
expected_query = {
'foo': 'bar',
'bar': 'baz',
'x-auth-token': MASKED_ATTRIBUTE_VALUE,
'st2-api-key': MASKED_ATTRIBUTE_VALUE,
'password': MASKED_ATTRIBUTE_VALUE,
'token': MASKED_ATTRIBUTE_VALUE,
'st2_auth_token': MASKED_ATTRIBUTE_VALUE
}
call_kwargs = mock_log.info.call_args_list[0][1]
query = call_kwargs['extra']['query']
self.assertEqual(query, expected_query)
|
|
c5dd440383935fb4996ca08b76928ed8d84d0fb9
|
events/templatetags/humantime.py
|
events/templatetags/humantime.py
|
# -*- encoding:utf-8 -*-
# Template tag
from django.template.defaultfilters import stringfilter
from datetime import datetime, timedelta
from django import template
register = template.Library()
@register.filter
def event_time(start, end):
today = datetime.today ()
result = ""
if start == today:
result += "aujourd'hui "
else:
result += "le %s " % start.strftime ("%A %d %B %Y")
if start.day == end.day and start.month == end.month and start.year == end.year:
result += "de %s " % start.strftime ("%H:%M")
result += "à %s " % end.strftime ("%H:%M")
else:
result += "à %s" % start.strftime ("%H:%M")
result += "jusqu'au %s" % end.strftime ("%A %d %B %Y à %H:%M")
return result
|
Add a template tag filter to properly format the time of events
|
Add a template tag filter to properly format the time of events
|
Python
|
agpl-3.0
|
vcorreze/agendaEteAccoord,vcorreze/agendaEteAccoord,mlhamel/agendadulibre,mlhamel/agendadulibre,mlhamel/agendadulibre,vcorreze/agendaEteAccoord
|
Add a template tag filter to properly format the time of events
|
# -*- encoding:utf-8 -*-
# Template tag
from django.template.defaultfilters import stringfilter
from datetime import datetime, timedelta
from django import template
register = template.Library()
@register.filter
def event_time(start, end):
today = datetime.today ()
result = ""
if start == today:
result += "aujourd'hui "
else:
result += "le %s " % start.strftime ("%A %d %B %Y")
if start.day == end.day and start.month == end.month and start.year == end.year:
result += "de %s " % start.strftime ("%H:%M")
result += "à %s " % end.strftime ("%H:%M")
else:
result += "à %s" % start.strftime ("%H:%M")
result += "jusqu'au %s" % end.strftime ("%A %d %B %Y à %H:%M")
return result
|
<commit_before><commit_msg>Add a template tag filter to properly format the time of events<commit_after>
|
# -*- encoding:utf-8 -*-
# Template tag
from django.template.defaultfilters import stringfilter
from datetime import datetime, timedelta
from django import template
register = template.Library()
@register.filter
def event_time(start, end):
today = datetime.today ()
result = ""
if start == today:
result += "aujourd'hui "
else:
result += "le %s " % start.strftime ("%A %d %B %Y")
if start.day == end.day and start.month == end.month and start.year == end.year:
result += "de %s " % start.strftime ("%H:%M")
result += "à %s " % end.strftime ("%H:%M")
else:
result += "à %s" % start.strftime ("%H:%M")
result += "jusqu'au %s" % end.strftime ("%A %d %B %Y à %H:%M")
return result
|
Add a template tag filter to properly format the time of events# -*- encoding:utf-8 -*-
# Template tag
from django.template.defaultfilters import stringfilter
from datetime import datetime, timedelta
from django import template
register = template.Library()
@register.filter
def event_time(start, end):
today = datetime.today ()
result = ""
if start == today:
result += "aujourd'hui "
else:
result += "le %s " % start.strftime ("%A %d %B %Y")
if start.day == end.day and start.month == end.month and start.year == end.year:
result += "de %s " % start.strftime ("%H:%M")
result += "à %s " % end.strftime ("%H:%M")
else:
result += "à %s" % start.strftime ("%H:%M")
result += "jusqu'au %s" % end.strftime ("%A %d %B %Y à %H:%M")
return result
|
<commit_before><commit_msg>Add a template tag filter to properly format the time of events<commit_after># -*- encoding:utf-8 -*-
# Template tag
from django.template.defaultfilters import stringfilter
from datetime import datetime, timedelta
from django import template
register = template.Library()
@register.filter
def event_time(start, end):
today = datetime.today ()
result = ""
if start == today:
result += "aujourd'hui "
else:
result += "le %s " % start.strftime ("%A %d %B %Y")
if start.day == end.day and start.month == end.month and start.year == end.year:
result += "de %s " % start.strftime ("%H:%M")
result += "à %s " % end.strftime ("%H:%M")
else:
result += "à %s" % start.strftime ("%H:%M")
result += "jusqu'au %s" % end.strftime ("%A %d %B %Y à %H:%M")
return result
|
|
fa3c5c4c80bcf8596013df7636ed7a1e19972c99
|
polyfit_distributions.py
|
polyfit_distributions.py
|
import numpy as np
def main():
np.random.seed(0)
bins = 50
X = np.random.zipf(1.2, 1000)
y = np.histogram(X[X<bins], bins, normed=True)[0]
fn = np.polyfit(np.arange(bins), y, 3)
print(fn)
np.random.seed(0)
bins = 50
samples = 1000
X = [np.random.zipf(1.2, samples),
np.random.zipf(1.3, samples),
np.random.zipf(1.5, samples)]
y = np.array([np.histogram(x[x<b], bins, normed=True)[0] for x in X])
fn = np.polyfit(np.arange(bins), y.T, 3)
print(fn)
if __name__ == '__main__':
main()
|
Build curves for a single zipfian distribution and then 3 combined
|
Build curves for a single zipfian distribution and then 3 combined
|
Python
|
mit
|
noelevans/sandpit,noelevans/sandpit,noelevans/sandpit,noelevans/sandpit,noelevans/sandpit,noelevans/sandpit
|
Build curves for a single zipfian distribution and then 3 combined
|
import numpy as np
def main():
np.random.seed(0)
bins = 50
X = np.random.zipf(1.2, 1000)
y = np.histogram(X[X<bins], bins, normed=True)[0]
fn = np.polyfit(np.arange(bins), y, 3)
print(fn)
np.random.seed(0)
bins = 50
samples = 1000
X = [np.random.zipf(1.2, samples),
np.random.zipf(1.3, samples),
np.random.zipf(1.5, samples)]
y = np.array([np.histogram(x[x<b], bins, normed=True)[0] for x in X])
fn = np.polyfit(np.arange(bins), y.T, 3)
print(fn)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Build curves for a single zipfian distribution and then 3 combined<commit_after>
|
import numpy as np
def main():
np.random.seed(0)
bins = 50
X = np.random.zipf(1.2, 1000)
y = np.histogram(X[X<bins], bins, normed=True)[0]
fn = np.polyfit(np.arange(bins), y, 3)
print(fn)
np.random.seed(0)
bins = 50
samples = 1000
X = [np.random.zipf(1.2, samples),
np.random.zipf(1.3, samples),
np.random.zipf(1.5, samples)]
y = np.array([np.histogram(x[x<b], bins, normed=True)[0] for x in X])
fn = np.polyfit(np.arange(bins), y.T, 3)
print(fn)
if __name__ == '__main__':
main()
|
Build curves for a single zipfian distribution and then 3 combinedimport numpy as np
def main():
np.random.seed(0)
bins = 50
X = np.random.zipf(1.2, 1000)
y = np.histogram(X[X<bins], bins, normed=True)[0]
fn = np.polyfit(np.arange(bins), y, 3)
print(fn)
np.random.seed(0)
bins = 50
samples = 1000
X = [np.random.zipf(1.2, samples),
np.random.zipf(1.3, samples),
np.random.zipf(1.5, samples)]
y = np.array([np.histogram(x[x<b], bins, normed=True)[0] for x in X])
fn = np.polyfit(np.arange(bins), y.T, 3)
print(fn)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Build curves for a single zipfian distribution and then 3 combined<commit_after>import numpy as np
def main():
np.random.seed(0)
bins = 50
X = np.random.zipf(1.2, 1000)
y = np.histogram(X[X<bins], bins, normed=True)[0]
fn = np.polyfit(np.arange(bins), y, 3)
print(fn)
np.random.seed(0)
bins = 50
samples = 1000
X = [np.random.zipf(1.2, samples),
np.random.zipf(1.3, samples),
np.random.zipf(1.5, samples)]
y = np.array([np.histogram(x[x<b], bins, normed=True)[0] for x in X])
fn = np.polyfit(np.arange(bins), y.T, 3)
print(fn)
if __name__ == '__main__':
main()
|
|
8c17a1eb43da7171da9085c2c6e92815460057f3
|
accelerator/migrations/0014_expert_profile_expert_category_alter_verbose_name.py
|
accelerator/migrations/0014_expert_profile_expert_category_alter_verbose_name.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-11-25 17:04
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0013_remove_mentorprogramofficehour_old_location'),
]
operations = [
migrations.AlterField(
model_name='expertprofile',
name='expert_category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='experts', to=settings.ACCELERATOR_EXPERTCATEGORY_MODEL, verbose_name='I primarily consider myself a(n)'),
),
]
|
Add migration for altered field
|
[AC-7272] Add migration for altered field
|
Python
|
mit
|
masschallenge/django-accelerator,masschallenge/django-accelerator
|
[AC-7272] Add migration for altered field
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-11-25 17:04
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0013_remove_mentorprogramofficehour_old_location'),
]
operations = [
migrations.AlterField(
model_name='expertprofile',
name='expert_category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='experts', to=settings.ACCELERATOR_EXPERTCATEGORY_MODEL, verbose_name='I primarily consider myself a(n)'),
),
]
|
<commit_before><commit_msg>[AC-7272] Add migration for altered field<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-11-25 17:04
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0013_remove_mentorprogramofficehour_old_location'),
]
operations = [
migrations.AlterField(
model_name='expertprofile',
name='expert_category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='experts', to=settings.ACCELERATOR_EXPERTCATEGORY_MODEL, verbose_name='I primarily consider myself a(n)'),
),
]
|
[AC-7272] Add migration for altered field# -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-11-25 17:04
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0013_remove_mentorprogramofficehour_old_location'),
]
operations = [
migrations.AlterField(
model_name='expertprofile',
name='expert_category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='experts', to=settings.ACCELERATOR_EXPERTCATEGORY_MODEL, verbose_name='I primarily consider myself a(n)'),
),
]
|
<commit_before><commit_msg>[AC-7272] Add migration for altered field<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-11-25 17:04
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0013_remove_mentorprogramofficehour_old_location'),
]
operations = [
migrations.AlterField(
model_name='expertprofile',
name='expert_category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='experts', to=settings.ACCELERATOR_EXPERTCATEGORY_MODEL, verbose_name='I primarily consider myself a(n)'),
),
]
|
|
f732ab01373e73fe8f707e88f8ba60f4610fc0d4
|
polling_stations/apps/data_collection/management/commands/import_denbighshire.py
|
polling_stations/apps/data_collection/management/commands/import_denbighshire.py
|
"""
Import Denbighshire
"""
from time import sleep
from django.contrib.gis.geos import Point
from data_collection.management.commands import BaseAddressCsvImporter
from data_finder.helpers import geocode
class Command(BaseAddressCsvImporter):
"""
Imports the Polling Station data from Denbighshire
"""
council_id = 'W06000004'
addresses_name = 'PropertyPostCodePollingStationWebLookup-2016-02-09.CSV'
stations_name = 'PollingStations-2016-02-09.csv'
csv_encoding = 'latin-1'
def station_record_to_dict(self, record):
# format address
address = "\n".join([
record.pollingplaceaddress1,
record.pollingplaceaddress2,
record.pollingplaceaddress3,
record.pollingplaceaddress4,
record.pollingplaceaddress5,
record.pollingplaceaddress6,
])
while "\n\n" in address:
address = address.replace("\n\n", "\n")
"""
No grid references were supplied,
so attempt to derive a grid ref from postcode
Unfortunately some of these postcodes cover
quite large areas, so the postcode centroid may
be some distance from the polling station :(
"""
sleep(1.3) # ensure we don't hit mapit's usage limit
try:
gridref = geocode(record.pollingplaceaddress7)
location = Point(gridref['wgs84_lon'], gridref['wgs84_lat'], srid=4326)
except KeyError:
if record.pollingplaceaddress7 == 'LL21 8HA':
location = Point(-3.7330709, 52.9864346, srid=4326)
else:
location = None
return {
'internal_council_id': record.pollingplaceid,
'postcode' : record.pollingplaceaddress7,
'address' : address,
'location' : location
}
def address_record_to_dict(self, record):
if record.propertynumber == '0':
address = record.streetname
else:
address = '%s %s' % (record.propertynumber, record.streetname)
return {
'address' : address,
'postcode' : record.postcode,
'polling_station_id': record.pollingplaceid
}
|
Add import script for Denbighshire
|
Add import script for Denbighshire
|
Python
|
bsd-3-clause
|
chris48s/UK-Polling-Stations,andylolz/UK-Polling-Stations,chris48s/UK-Polling-Stations,andylolz/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,andylolz/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations
|
Add import script for Denbighshire
|
"""
Import Denbighshire
"""
from time import sleep
from django.contrib.gis.geos import Point
from data_collection.management.commands import BaseAddressCsvImporter
from data_finder.helpers import geocode
class Command(BaseAddressCsvImporter):
"""
Imports the Polling Station data from Denbighshire
"""
council_id = 'W06000004'
addresses_name = 'PropertyPostCodePollingStationWebLookup-2016-02-09.CSV'
stations_name = 'PollingStations-2016-02-09.csv'
csv_encoding = 'latin-1'
def station_record_to_dict(self, record):
# format address
address = "\n".join([
record.pollingplaceaddress1,
record.pollingplaceaddress2,
record.pollingplaceaddress3,
record.pollingplaceaddress4,
record.pollingplaceaddress5,
record.pollingplaceaddress6,
])
while "\n\n" in address:
address = address.replace("\n\n", "\n")
"""
No grid references were supplied,
so attempt to derive a grid ref from postcode
Unfortunately some of these postcodes cover
quite large areas, so the postcode centroid may
be some distance from the polling station :(
"""
sleep(1.3) # ensure we don't hit mapit's usage limit
try:
gridref = geocode(record.pollingplaceaddress7)
location = Point(gridref['wgs84_lon'], gridref['wgs84_lat'], srid=4326)
except KeyError:
if record.pollingplaceaddress7 == 'LL21 8HA':
location = Point(-3.7330709, 52.9864346, srid=4326)
else:
location = None
return {
'internal_council_id': record.pollingplaceid,
'postcode' : record.pollingplaceaddress7,
'address' : address,
'location' : location
}
def address_record_to_dict(self, record):
if record.propertynumber == '0':
address = record.streetname
else:
address = '%s %s' % (record.propertynumber, record.streetname)
return {
'address' : address,
'postcode' : record.postcode,
'polling_station_id': record.pollingplaceid
}
|
<commit_before><commit_msg>Add import script for Denbighshire<commit_after>
|
"""
Import Denbighshire
"""
from time import sleep
from django.contrib.gis.geos import Point
from data_collection.management.commands import BaseAddressCsvImporter
from data_finder.helpers import geocode
class Command(BaseAddressCsvImporter):
"""
Imports the Polling Station data from Denbighshire
"""
council_id = 'W06000004'
addresses_name = 'PropertyPostCodePollingStationWebLookup-2016-02-09.CSV'
stations_name = 'PollingStations-2016-02-09.csv'
csv_encoding = 'latin-1'
def station_record_to_dict(self, record):
# format address
address = "\n".join([
record.pollingplaceaddress1,
record.pollingplaceaddress2,
record.pollingplaceaddress3,
record.pollingplaceaddress4,
record.pollingplaceaddress5,
record.pollingplaceaddress6,
])
while "\n\n" in address:
address = address.replace("\n\n", "\n")
"""
No grid references were supplied,
so attempt to derive a grid ref from postcode
Unfortunately some of these postcodes cover
quite large areas, so the postcode centroid may
be some distance from the polling station :(
"""
sleep(1.3) # ensure we don't hit mapit's usage limit
try:
gridref = geocode(record.pollingplaceaddress7)
location = Point(gridref['wgs84_lon'], gridref['wgs84_lat'], srid=4326)
except KeyError:
if record.pollingplaceaddress7 == 'LL21 8HA':
location = Point(-3.7330709, 52.9864346, srid=4326)
else:
location = None
return {
'internal_council_id': record.pollingplaceid,
'postcode' : record.pollingplaceaddress7,
'address' : address,
'location' : location
}
def address_record_to_dict(self, record):
if record.propertynumber == '0':
address = record.streetname
else:
address = '%s %s' % (record.propertynumber, record.streetname)
return {
'address' : address,
'postcode' : record.postcode,
'polling_station_id': record.pollingplaceid
}
|
Add import script for Denbighshire"""
Import Denbighshire
"""
from time import sleep
from django.contrib.gis.geos import Point
from data_collection.management.commands import BaseAddressCsvImporter
from data_finder.helpers import geocode
class Command(BaseAddressCsvImporter):
"""
Imports the Polling Station data from Denbighshire
"""
council_id = 'W06000004'
addresses_name = 'PropertyPostCodePollingStationWebLookup-2016-02-09.CSV'
stations_name = 'PollingStations-2016-02-09.csv'
csv_encoding = 'latin-1'
def station_record_to_dict(self, record):
# format address
address = "\n".join([
record.pollingplaceaddress1,
record.pollingplaceaddress2,
record.pollingplaceaddress3,
record.pollingplaceaddress4,
record.pollingplaceaddress5,
record.pollingplaceaddress6,
])
while "\n\n" in address:
address = address.replace("\n\n", "\n")
"""
No grid references were supplied,
so attempt to derive a grid ref from postcode
Unfortunately some of these postcodes cover
quite large areas, so the postcode centroid may
be some distance from the polling station :(
"""
sleep(1.3) # ensure we don't hit mapit's usage limit
try:
gridref = geocode(record.pollingplaceaddress7)
location = Point(gridref['wgs84_lon'], gridref['wgs84_lat'], srid=4326)
except KeyError:
if record.pollingplaceaddress7 == 'LL21 8HA':
location = Point(-3.7330709, 52.9864346, srid=4326)
else:
location = None
return {
'internal_council_id': record.pollingplaceid,
'postcode' : record.pollingplaceaddress7,
'address' : address,
'location' : location
}
def address_record_to_dict(self, record):
if record.propertynumber == '0':
address = record.streetname
else:
address = '%s %s' % (record.propertynumber, record.streetname)
return {
'address' : address,
'postcode' : record.postcode,
'polling_station_id': record.pollingplaceid
}
|
<commit_before><commit_msg>Add import script for Denbighshire<commit_after>"""
Import Denbighshire
"""
from time import sleep
from django.contrib.gis.geos import Point
from data_collection.management.commands import BaseAddressCsvImporter
from data_finder.helpers import geocode
class Command(BaseAddressCsvImporter):
"""
Imports the Polling Station data from Denbighshire
"""
council_id = 'W06000004'
addresses_name = 'PropertyPostCodePollingStationWebLookup-2016-02-09.CSV'
stations_name = 'PollingStations-2016-02-09.csv'
csv_encoding = 'latin-1'
def station_record_to_dict(self, record):
# format address
address = "\n".join([
record.pollingplaceaddress1,
record.pollingplaceaddress2,
record.pollingplaceaddress3,
record.pollingplaceaddress4,
record.pollingplaceaddress5,
record.pollingplaceaddress6,
])
while "\n\n" in address:
address = address.replace("\n\n", "\n")
"""
No grid references were supplied,
so attempt to derive a grid ref from postcode
Unfortunately some of these postcodes cover
quite large areas, so the postcode centroid may
be some distance from the polling station :(
"""
sleep(1.3) # ensure we don't hit mapit's usage limit
try:
gridref = geocode(record.pollingplaceaddress7)
location = Point(gridref['wgs84_lon'], gridref['wgs84_lat'], srid=4326)
except KeyError:
if record.pollingplaceaddress7 == 'LL21 8HA':
location = Point(-3.7330709, 52.9864346, srid=4326)
else:
location = None
return {
'internal_council_id': record.pollingplaceid,
'postcode' : record.pollingplaceaddress7,
'address' : address,
'location' : location
}
def address_record_to_dict(self, record):
if record.propertynumber == '0':
address = record.streetname
else:
address = '%s %s' % (record.propertynumber, record.streetname)
return {
'address' : address,
'postcode' : record.postcode,
'polling_station_id': record.pollingplaceid
}
|
|
76ae560be419ac350d79db08772d6b7f5722754b
|
python/sparktestingbase/test/simple_streaming_test.py
|
python/sparktestingbase/test/simple_streaming_test.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Simple streaming test"""
from sparktestingbase.streamingtestcase import StreamingTestCase
import unittest2
class SimpleStreamingTest(StreamingTestCase):
"""A simple test."""
@classmethod
def tokenize(cls, f):
return f.flatMap(lambda line: line.split(" "))
def test_simple_transformation(self):
input = [["hi"], ["hi holden"], ["bye"]]
expected = [["hi"], ["hi", "holden"], ["bye"]]
self.run_func(input, SimpleStreamingTest.tokenize, expected)
if __name__ == "__main__":
unittest2.main()
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Simple streaming test"""
from sparktestingbase.streamingtestcase import StreamingTestCase
import unittest2
class SimpleStreamingTest(StreamingTestCase):
"""A simple test."""
@classmethod
def tokenize(cls, f):
return f.flatMap(lambda line: line.split(" "))
@classmethod
def noop(cls, f):
return f.map(lambda x: x)
def test_simple_transformation(self):
input = [["hi"], ["hi holden"], ["bye"]]
expected = [["hi"], ["hi", "holden"], ["bye"]]
self.run_func(input, SimpleStreamingTest.tokenize, expected)
def test_noop_transformation(self):
input = [["hi"], ["hi holden"], ["bye"]]
self.run_func(input, SimpleStreamingTest.noop, input)
if __name__ == "__main__":
unittest2.main()
|
Add a second trivial streaming test to make sure our re-useing the spark context is ok
|
Add a second trivial streaming test to make sure our re-useing the spark context is ok
|
Python
|
apache-2.0
|
holdenk/spark-testing-base,holdenk/spark-testing-base,ponkin/spark-testing-base,joychugh/spark-testing-base,MiguelPeralvo/spark-testing-base,snithish/spark-testing-base,samklr/spark-testing-base,ghl3/spark-testing-base,MiguelPeralvo/spark-testing-base,MiguelPeralvo/spark-testing-base,jnadler/spark-testing-base,eyeem/spark-testing-base,holdenk/spark-testing-base,ponkin/spark-testing-base,eyeem/spark-testing-base,mahmoudhanafy/spark-testing-base,snithish/spark-testing-base,ponkin/spark-testing-base,hellofresh/spark-testing-base,samklr/spark-testing-base,eyeem/spark-testing-base,jnadler/spark-testing-base,holdenk/spark-testing-base,MiguelPeralvo/spark-testing-base,ghl3/spark-testing-base,mahmoudhanafy/spark-testing-base,snithish/spark-testing-base,ponkin/spark-testing-base,mahmoudhanafy/spark-testing-base,snithish/spark-testing-base,ghl3/spark-testing-base,ghl3/spark-testing-base,hellofresh/spark-testing-base,jnadler/spark-testing-base,joychugh/spark-testing-base,mahmoudhanafy/spark-testing-base,samklr/spark-testing-base,joychugh/spark-testing-base,joychugh/spark-testing-base,jnadler/spark-testing-base
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Simple streaming test"""
from sparktestingbase.streamingtestcase import StreamingTestCase
import unittest2
class SimpleStreamingTest(StreamingTestCase):
"""A simple test."""
@classmethod
def tokenize(cls, f):
return f.flatMap(lambda line: line.split(" "))
def test_simple_transformation(self):
input = [["hi"], ["hi holden"], ["bye"]]
expected = [["hi"], ["hi", "holden"], ["bye"]]
self.run_func(input, SimpleStreamingTest.tokenize, expected)
if __name__ == "__main__":
unittest2.main()
Add a second trivial streaming test to make sure our re-useing the spark context is ok
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Simple streaming test"""
from sparktestingbase.streamingtestcase import StreamingTestCase
import unittest2
class SimpleStreamingTest(StreamingTestCase):
"""A simple test."""
@classmethod
def tokenize(cls, f):
return f.flatMap(lambda line: line.split(" "))
@classmethod
def noop(cls, f):
return f.map(lambda x: x)
def test_simple_transformation(self):
input = [["hi"], ["hi holden"], ["bye"]]
expected = [["hi"], ["hi", "holden"], ["bye"]]
self.run_func(input, SimpleStreamingTest.tokenize, expected)
def test_noop_transformation(self):
input = [["hi"], ["hi holden"], ["bye"]]
self.run_func(input, SimpleStreamingTest.noop, input)
if __name__ == "__main__":
unittest2.main()
|
<commit_before>#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Simple streaming test"""
from sparktestingbase.streamingtestcase import StreamingTestCase
import unittest2
class SimpleStreamingTest(StreamingTestCase):
"""A simple test."""
@classmethod
def tokenize(cls, f):
return f.flatMap(lambda line: line.split(" "))
def test_simple_transformation(self):
input = [["hi"], ["hi holden"], ["bye"]]
expected = [["hi"], ["hi", "holden"], ["bye"]]
self.run_func(input, SimpleStreamingTest.tokenize, expected)
if __name__ == "__main__":
unittest2.main()
<commit_msg>Add a second trivial streaming test to make sure our re-useing the spark context is ok<commit_after>
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Simple streaming test"""
from sparktestingbase.streamingtestcase import StreamingTestCase
import unittest2
class SimpleStreamingTest(StreamingTestCase):
"""A simple test."""
@classmethod
def tokenize(cls, f):
return f.flatMap(lambda line: line.split(" "))
@classmethod
def noop(cls, f):
return f.map(lambda x: x)
def test_simple_transformation(self):
input = [["hi"], ["hi holden"], ["bye"]]
expected = [["hi"], ["hi", "holden"], ["bye"]]
self.run_func(input, SimpleStreamingTest.tokenize, expected)
def test_noop_transformation(self):
input = [["hi"], ["hi holden"], ["bye"]]
self.run_func(input, SimpleStreamingTest.noop, input)
if __name__ == "__main__":
unittest2.main()
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Simple streaming test"""
from sparktestingbase.streamingtestcase import StreamingTestCase
import unittest2
class SimpleStreamingTest(StreamingTestCase):
"""A simple test."""
@classmethod
def tokenize(cls, f):
return f.flatMap(lambda line: line.split(" "))
def test_simple_transformation(self):
input = [["hi"], ["hi holden"], ["bye"]]
expected = [["hi"], ["hi", "holden"], ["bye"]]
self.run_func(input, SimpleStreamingTest.tokenize, expected)
if __name__ == "__main__":
unittest2.main()
Add a second trivial streaming test to make sure our re-useing the spark context is ok#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Simple streaming test"""
from sparktestingbase.streamingtestcase import StreamingTestCase
import unittest2
class SimpleStreamingTest(StreamingTestCase):
"""A simple test."""
@classmethod
def tokenize(cls, f):
return f.flatMap(lambda line: line.split(" "))
@classmethod
def noop(cls, f):
return f.map(lambda x: x)
def test_simple_transformation(self):
input = [["hi"], ["hi holden"], ["bye"]]
expected = [["hi"], ["hi", "holden"], ["bye"]]
self.run_func(input, SimpleStreamingTest.tokenize, expected)
def test_noop_transformation(self):
input = [["hi"], ["hi holden"], ["bye"]]
self.run_func(input, SimpleStreamingTest.noop, input)
if __name__ == "__main__":
unittest2.main()
|
<commit_before>#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Simple streaming test"""
from sparktestingbase.streamingtestcase import StreamingTestCase
import unittest2
class SimpleStreamingTest(StreamingTestCase):
"""A simple test."""
@classmethod
def tokenize(cls, f):
return f.flatMap(lambda line: line.split(" "))
def test_simple_transformation(self):
input = [["hi"], ["hi holden"], ["bye"]]
expected = [["hi"], ["hi", "holden"], ["bye"]]
self.run_func(input, SimpleStreamingTest.tokenize, expected)
if __name__ == "__main__":
unittest2.main()
<commit_msg>Add a second trivial streaming test to make sure our re-useing the spark context is ok<commit_after>#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Simple streaming test"""
from sparktestingbase.streamingtestcase import StreamingTestCase
import unittest2
class SimpleStreamingTest(StreamingTestCase):
"""A simple test."""
@classmethod
def tokenize(cls, f):
return f.flatMap(lambda line: line.split(" "))
@classmethod
def noop(cls, f):
return f.map(lambda x: x)
def test_simple_transformation(self):
input = [["hi"], ["hi holden"], ["bye"]]
expected = [["hi"], ["hi", "holden"], ["bye"]]
self.run_func(input, SimpleStreamingTest.tokenize, expected)
def test_noop_transformation(self):
input = [["hi"], ["hi holden"], ["bye"]]
self.run_func(input, SimpleStreamingTest.noop, input)
if __name__ == "__main__":
unittest2.main()
|
f306b2145d5bff7a3d399e14b60274c58c3bf098
|
scripts/tests/test_box_migrate_to_external_account.py
|
scripts/tests/test_box_migrate_to_external_account.py
|
from nose.tools import *
from scripts.box.migrate_to_external_account import do_migration, get_targets
from framework.auth import Auth
from tests.base import OsfTestCase
from tests.factories import ProjectFactory, UserFactory
from website.addons.box.model import BoxUserSettings
from website.addons.box.tests.factories import BoxOAuthSettingsFactory
class TestBoxMigration(OsfTestCase):
# Note: BoxUserSettings.user_settings has to be changed to foreign_user_settings (model and mongo). See migration instructions
def test_migration_no_project(self):
user = UserFactory()
user.add_addon('box')
user_addon = user.get_addon('box')
user_addon.oauth_settings = BoxOAuthSettingsFactory()
user_addon.save()
do_migration([user_addon])
user_addon.reload()
assert_is_none(user_addon.oauth_settings)
assert_equal(len(user.external_accounts), 1)
account = user.external_accounts[0]
assert_equal(account.provider, 'box')
assert_equal(account.oauth_key, 'abcdef1')
def test_migration_removes_targets(self):
BoxUserSettings.remove()
user = UserFactory()
project = ProjectFactory(creator=user)
user.add_addon('box', auth=Auth(user))
user_addon = user.get_addon('box')
user_addon.oauth_settings = BoxOAuthSettingsFactory()
user_addon.save()
project.add_addon('box', auth=Auth(user))
node_addon = project.get_addon('box')
node_addon.foreign_user_settings = user_addon
node_addon.save()
assert_equal(get_targets().count(), 1)
do_migration([user_addon])
user_addon.reload()
assert_equal(get_targets().count(), 0)
def test_migration_multiple_users(self):
user1 = UserFactory()
user2 = UserFactory()
oauth_settings = BoxOAuthSettingsFactory()
user1.add_addon('box')
user1_addon = user1.get_addon('box')
user1_addon.oauth_settings = oauth_settings
user1_addon.save()
user2.add_addon('box')
user2_addon = user2.get_addon('box')
user2_addon.oauth_settings = oauth_settings
user2_addon.save()
do_migration([user1_addon, user2_addon])
user1_addon.reload()
user2_addon.reload()
assert_equal(
user1.external_accounts[0],
user2.external_accounts[0],
)
def test_get_targets(self):
BoxUserSettings.remove()
addons = [
BoxUserSettings(),
BoxUserSettings(oauth_settings=BoxOAuthSettingsFactory()),
]
for addon in addons:
addon.save()
targets = get_targets()
assert_equal(targets.count(), 1)
assert_equal(targets[0]._id, addons[-1]._id)
|
Add test for box migration script
|
Add test for box migration script
|
Python
|
apache-2.0
|
KAsante95/osf.io,aaxelb/osf.io,Nesiehr/osf.io,emetsger/osf.io,Nesiehr/osf.io,chrisseto/osf.io,Nesiehr/osf.io,saradbowman/osf.io,SSJohns/osf.io,binoculars/osf.io,mluo613/osf.io,leb2dg/osf.io,wearpants/osf.io,HalcyonChimera/osf.io,amyshi188/osf.io,kch8qx/osf.io,acshi/osf.io,saradbowman/osf.io,haoyuchen1992/osf.io,CenterForOpenScience/osf.io,pattisdr/osf.io,billyhunt/osf.io,mluo613/osf.io,caseyrygt/osf.io,cslzchen/osf.io,aaxelb/osf.io,mattclark/osf.io,GageGaskins/osf.io,leb2dg/osf.io,acshi/osf.io,njantrania/osf.io,GageGaskins/osf.io,samchrisinger/osf.io,Johnetordoff/osf.io,samanehsan/osf.io,baylee-d/osf.io,TomBaxter/osf.io,doublebits/osf.io,RomanZWang/osf.io,Johnetordoff/osf.io,doublebits/osf.io,mluke93/osf.io,kwierman/osf.io,cwisecarver/osf.io,aaxelb/osf.io,cosenal/osf.io,adlius/osf.io,samanehsan/osf.io,CenterForOpenScience/osf.io,kwierman/osf.io,kwierman/osf.io,alexschiller/osf.io,ticklemepierce/osf.io,acshi/osf.io,brandonPurvis/osf.io,brandonPurvis/osf.io,mfraezz/osf.io,haoyuchen1992/osf.io,mluo613/osf.io,cosenal/osf.io,cslzchen/osf.io,kch8qx/osf.io,mluke93/osf.io,alexschiller/osf.io,brianjgeiger/osf.io,njantrania/osf.io,TomHeatwole/osf.io,billyhunt/osf.io,RomanZWang/osf.io,DanielSBrown/osf.io,amyshi188/osf.io,chrisseto/osf.io,laurenrevere/osf.io,caseyrollins/osf.io,brandonPurvis/osf.io,hmoco/osf.io,danielneis/osf.io,felliott/osf.io,laurenrevere/osf.io,njantrania/osf.io,caseyrygt/osf.io,cslzchen/osf.io,asanfilippo7/osf.io,erinspace/osf.io,amyshi188/osf.io,TomBaxter/osf.io,samanehsan/osf.io,asanfilippo7/osf.io,hmoco/osf.io,jnayak1/osf.io,zamattiac/osf.io,baylee-d/osf.io,hmoco/osf.io,pattisdr/osf.io,billyhunt/osf.io,brianjgeiger/osf.io,caseyrygt/osf.io,leb2dg/osf.io,samchrisinger/osf.io,monikagrabowska/osf.io,crcresearch/osf.io,abought/osf.io,samchrisinger/osf.io,binoculars/osf.io,felliott/osf.io,TomHeatwole/osf.io,samchrisinger/osf.io,brandonPurvis/osf.io,emetsger/osf.io,mluke93/osf.io,Johnetordoff/osf.io,aaxelb/osf.io,billyhunt/osf.io,amyshi188/osf.io,RomanZWang/osf.io,doublebits/osf.io,adlius/osf.io,Nesiehr/osf.io,zachjanicki/osf.io,DanielSBrown/osf.io,zamattiac/osf.io,DanielSBrown/osf.io,ticklemepierce/osf.io,Ghalko/osf.io,HalcyonChimera/osf.io,CenterForOpenScience/osf.io,cwisecarver/osf.io,cosenal/osf.io,KAsante95/osf.io,ticklemepierce/osf.io,haoyuchen1992/osf.io,SSJohns/osf.io,monikagrabowska/osf.io,jnayak1/osf.io,alexschiller/osf.io,njantrania/osf.io,chennan47/osf.io,mluke93/osf.io,Ghalko/osf.io,kch8qx/osf.io,chennan47/osf.io,cosenal/osf.io,abought/osf.io,emetsger/osf.io,GageGaskins/osf.io,chrisseto/osf.io,CenterForOpenScience/osf.io,jnayak1/osf.io,icereval/osf.io,jnayak1/osf.io,danielneis/osf.io,RomanZWang/osf.io,caseyrygt/osf.io,zamattiac/osf.io,Ghalko/osf.io,RomanZWang/osf.io,KAsante95/osf.io,icereval/osf.io,TomHeatwole/osf.io,chrisseto/osf.io,kwierman/osf.io,rdhyee/osf.io,danielneis/osf.io,mattclark/osf.io,wearpants/osf.io,KAsante95/osf.io,billyhunt/osf.io,abought/osf.io,TomBaxter/osf.io,abought/osf.io,mfraezz/osf.io,brianjgeiger/osf.io,SSJohns/osf.io,ZobairAlijan/osf.io,binoculars/osf.io,felliott/osf.io,monikagrabowska/osf.io,rdhyee/osf.io,erinspace/osf.io,asanfilippo7/osf.io,caneruguz/osf.io,zachjanicki/osf.io,TomHeatwole/osf.io,ZobairAlijan/osf.io,felliott/osf.io,monikagrabowska/osf.io,Ghalko/osf.io,caseyrollins/osf.io,zachjanicki/osf.io,brianjgeiger/osf.io,adlius/osf.io,samanehsan/osf.io,SSJohns/osf.io,zamattiac/osf.io,wearpants/osf.io,KAsante95/osf.io,cwisecarver/osf.io,mluo613/osf.io,adlius/osf.io,danielneis/osf.io,icereval/osf.io,ticklemepierce/osf.io,Johnetordoff/osf.io,alexschiller/osf.io,caseyrollins/osf.io,GageGaskins/osf.io,mfraezz/osf.io,caneruguz/osf.io,hmoco/osf.io,chennan47/osf.io,baylee-d/osf.io,crcresearch/osf.io,HalcyonChimera/osf.io,zachjanicki/osf.io,cslzchen/osf.io,kch8qx/osf.io,ZobairAlijan/osf.io,asanfilippo7/osf.io,emetsger/osf.io,cwisecarver/osf.io,sloria/osf.io,doublebits/osf.io,rdhyee/osf.io,HalcyonChimera/osf.io,pattisdr/osf.io,wearpants/osf.io,erinspace/osf.io,ZobairAlijan/osf.io,mfraezz/osf.io,brandonPurvis/osf.io,haoyuchen1992/osf.io,alexschiller/osf.io,monikagrabowska/osf.io,doublebits/osf.io,kch8qx/osf.io,rdhyee/osf.io,sloria/osf.io,laurenrevere/osf.io,crcresearch/osf.io,caneruguz/osf.io,DanielSBrown/osf.io,acshi/osf.io,acshi/osf.io,sloria/osf.io,GageGaskins/osf.io,mattclark/osf.io,caneruguz/osf.io,mluo613/osf.io,leb2dg/osf.io
|
Add test for box migration script
|
from nose.tools import *
from scripts.box.migrate_to_external_account import do_migration, get_targets
from framework.auth import Auth
from tests.base import OsfTestCase
from tests.factories import ProjectFactory, UserFactory
from website.addons.box.model import BoxUserSettings
from website.addons.box.tests.factories import BoxOAuthSettingsFactory
class TestBoxMigration(OsfTestCase):
# Note: BoxUserSettings.user_settings has to be changed to foreign_user_settings (model and mongo). See migration instructions
def test_migration_no_project(self):
user = UserFactory()
user.add_addon('box')
user_addon = user.get_addon('box')
user_addon.oauth_settings = BoxOAuthSettingsFactory()
user_addon.save()
do_migration([user_addon])
user_addon.reload()
assert_is_none(user_addon.oauth_settings)
assert_equal(len(user.external_accounts), 1)
account = user.external_accounts[0]
assert_equal(account.provider, 'box')
assert_equal(account.oauth_key, 'abcdef1')
def test_migration_removes_targets(self):
BoxUserSettings.remove()
user = UserFactory()
project = ProjectFactory(creator=user)
user.add_addon('box', auth=Auth(user))
user_addon = user.get_addon('box')
user_addon.oauth_settings = BoxOAuthSettingsFactory()
user_addon.save()
project.add_addon('box', auth=Auth(user))
node_addon = project.get_addon('box')
node_addon.foreign_user_settings = user_addon
node_addon.save()
assert_equal(get_targets().count(), 1)
do_migration([user_addon])
user_addon.reload()
assert_equal(get_targets().count(), 0)
def test_migration_multiple_users(self):
user1 = UserFactory()
user2 = UserFactory()
oauth_settings = BoxOAuthSettingsFactory()
user1.add_addon('box')
user1_addon = user1.get_addon('box')
user1_addon.oauth_settings = oauth_settings
user1_addon.save()
user2.add_addon('box')
user2_addon = user2.get_addon('box')
user2_addon.oauth_settings = oauth_settings
user2_addon.save()
do_migration([user1_addon, user2_addon])
user1_addon.reload()
user2_addon.reload()
assert_equal(
user1.external_accounts[0],
user2.external_accounts[0],
)
def test_get_targets(self):
BoxUserSettings.remove()
addons = [
BoxUserSettings(),
BoxUserSettings(oauth_settings=BoxOAuthSettingsFactory()),
]
for addon in addons:
addon.save()
targets = get_targets()
assert_equal(targets.count(), 1)
assert_equal(targets[0]._id, addons[-1]._id)
|
<commit_before><commit_msg>Add test for box migration script<commit_after>
|
from nose.tools import *
from scripts.box.migrate_to_external_account import do_migration, get_targets
from framework.auth import Auth
from tests.base import OsfTestCase
from tests.factories import ProjectFactory, UserFactory
from website.addons.box.model import BoxUserSettings
from website.addons.box.tests.factories import BoxOAuthSettingsFactory
class TestBoxMigration(OsfTestCase):
# Note: BoxUserSettings.user_settings has to be changed to foreign_user_settings (model and mongo). See migration instructions
def test_migration_no_project(self):
user = UserFactory()
user.add_addon('box')
user_addon = user.get_addon('box')
user_addon.oauth_settings = BoxOAuthSettingsFactory()
user_addon.save()
do_migration([user_addon])
user_addon.reload()
assert_is_none(user_addon.oauth_settings)
assert_equal(len(user.external_accounts), 1)
account = user.external_accounts[0]
assert_equal(account.provider, 'box')
assert_equal(account.oauth_key, 'abcdef1')
def test_migration_removes_targets(self):
BoxUserSettings.remove()
user = UserFactory()
project = ProjectFactory(creator=user)
user.add_addon('box', auth=Auth(user))
user_addon = user.get_addon('box')
user_addon.oauth_settings = BoxOAuthSettingsFactory()
user_addon.save()
project.add_addon('box', auth=Auth(user))
node_addon = project.get_addon('box')
node_addon.foreign_user_settings = user_addon
node_addon.save()
assert_equal(get_targets().count(), 1)
do_migration([user_addon])
user_addon.reload()
assert_equal(get_targets().count(), 0)
def test_migration_multiple_users(self):
user1 = UserFactory()
user2 = UserFactory()
oauth_settings = BoxOAuthSettingsFactory()
user1.add_addon('box')
user1_addon = user1.get_addon('box')
user1_addon.oauth_settings = oauth_settings
user1_addon.save()
user2.add_addon('box')
user2_addon = user2.get_addon('box')
user2_addon.oauth_settings = oauth_settings
user2_addon.save()
do_migration([user1_addon, user2_addon])
user1_addon.reload()
user2_addon.reload()
assert_equal(
user1.external_accounts[0],
user2.external_accounts[0],
)
def test_get_targets(self):
BoxUserSettings.remove()
addons = [
BoxUserSettings(),
BoxUserSettings(oauth_settings=BoxOAuthSettingsFactory()),
]
for addon in addons:
addon.save()
targets = get_targets()
assert_equal(targets.count(), 1)
assert_equal(targets[0]._id, addons[-1]._id)
|
Add test for box migration scriptfrom nose.tools import *
from scripts.box.migrate_to_external_account import do_migration, get_targets
from framework.auth import Auth
from tests.base import OsfTestCase
from tests.factories import ProjectFactory, UserFactory
from website.addons.box.model import BoxUserSettings
from website.addons.box.tests.factories import BoxOAuthSettingsFactory
class TestBoxMigration(OsfTestCase):
# Note: BoxUserSettings.user_settings has to be changed to foreign_user_settings (model and mongo). See migration instructions
def test_migration_no_project(self):
user = UserFactory()
user.add_addon('box')
user_addon = user.get_addon('box')
user_addon.oauth_settings = BoxOAuthSettingsFactory()
user_addon.save()
do_migration([user_addon])
user_addon.reload()
assert_is_none(user_addon.oauth_settings)
assert_equal(len(user.external_accounts), 1)
account = user.external_accounts[0]
assert_equal(account.provider, 'box')
assert_equal(account.oauth_key, 'abcdef1')
def test_migration_removes_targets(self):
BoxUserSettings.remove()
user = UserFactory()
project = ProjectFactory(creator=user)
user.add_addon('box', auth=Auth(user))
user_addon = user.get_addon('box')
user_addon.oauth_settings = BoxOAuthSettingsFactory()
user_addon.save()
project.add_addon('box', auth=Auth(user))
node_addon = project.get_addon('box')
node_addon.foreign_user_settings = user_addon
node_addon.save()
assert_equal(get_targets().count(), 1)
do_migration([user_addon])
user_addon.reload()
assert_equal(get_targets().count(), 0)
def test_migration_multiple_users(self):
user1 = UserFactory()
user2 = UserFactory()
oauth_settings = BoxOAuthSettingsFactory()
user1.add_addon('box')
user1_addon = user1.get_addon('box')
user1_addon.oauth_settings = oauth_settings
user1_addon.save()
user2.add_addon('box')
user2_addon = user2.get_addon('box')
user2_addon.oauth_settings = oauth_settings
user2_addon.save()
do_migration([user1_addon, user2_addon])
user1_addon.reload()
user2_addon.reload()
assert_equal(
user1.external_accounts[0],
user2.external_accounts[0],
)
def test_get_targets(self):
BoxUserSettings.remove()
addons = [
BoxUserSettings(),
BoxUserSettings(oauth_settings=BoxOAuthSettingsFactory()),
]
for addon in addons:
addon.save()
targets = get_targets()
assert_equal(targets.count(), 1)
assert_equal(targets[0]._id, addons[-1]._id)
|
<commit_before><commit_msg>Add test for box migration script<commit_after>from nose.tools import *
from scripts.box.migrate_to_external_account import do_migration, get_targets
from framework.auth import Auth
from tests.base import OsfTestCase
from tests.factories import ProjectFactory, UserFactory
from website.addons.box.model import BoxUserSettings
from website.addons.box.tests.factories import BoxOAuthSettingsFactory
class TestBoxMigration(OsfTestCase):
# Note: BoxUserSettings.user_settings has to be changed to foreign_user_settings (model and mongo). See migration instructions
def test_migration_no_project(self):
user = UserFactory()
user.add_addon('box')
user_addon = user.get_addon('box')
user_addon.oauth_settings = BoxOAuthSettingsFactory()
user_addon.save()
do_migration([user_addon])
user_addon.reload()
assert_is_none(user_addon.oauth_settings)
assert_equal(len(user.external_accounts), 1)
account = user.external_accounts[0]
assert_equal(account.provider, 'box')
assert_equal(account.oauth_key, 'abcdef1')
def test_migration_removes_targets(self):
BoxUserSettings.remove()
user = UserFactory()
project = ProjectFactory(creator=user)
user.add_addon('box', auth=Auth(user))
user_addon = user.get_addon('box')
user_addon.oauth_settings = BoxOAuthSettingsFactory()
user_addon.save()
project.add_addon('box', auth=Auth(user))
node_addon = project.get_addon('box')
node_addon.foreign_user_settings = user_addon
node_addon.save()
assert_equal(get_targets().count(), 1)
do_migration([user_addon])
user_addon.reload()
assert_equal(get_targets().count(), 0)
def test_migration_multiple_users(self):
user1 = UserFactory()
user2 = UserFactory()
oauth_settings = BoxOAuthSettingsFactory()
user1.add_addon('box')
user1_addon = user1.get_addon('box')
user1_addon.oauth_settings = oauth_settings
user1_addon.save()
user2.add_addon('box')
user2_addon = user2.get_addon('box')
user2_addon.oauth_settings = oauth_settings
user2_addon.save()
do_migration([user1_addon, user2_addon])
user1_addon.reload()
user2_addon.reload()
assert_equal(
user1.external_accounts[0],
user2.external_accounts[0],
)
def test_get_targets(self):
BoxUserSettings.remove()
addons = [
BoxUserSettings(),
BoxUserSettings(oauth_settings=BoxOAuthSettingsFactory()),
]
for addon in addons:
addon.save()
targets = get_targets()
assert_equal(targets.count(), 1)
assert_equal(targets[0]._id, addons[-1]._id)
|
|
5eb7a643de51c972b585410b88b4c5f54bf3362a
|
patterns/creational/facade2.py
|
patterns/creational/facade2.py
|
import abc
class Shape(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def draw(self):
pass
class Rectangle(Shape):
def __init__(self):
super(Rectangle, self).__init__()
def draw(self):
print 'Drawing Rectangle...'
class Square(Shape):
def __init__(self):
super(Square, self).__init__()
def draw(self):
print 'Drawing Square...'
class DemoFacade():
def __init__(self):
self.rectangle = Rectangle()
self.square = Square()
def draw_rectangle(self):
self.rectangle.draw()
def draw_square(self):
self.square.draw()
# main class.
if __name__ == '__main__':
shape_facade = DemoFacade()
shape_facade.draw_rectangle()
shape_facade.draw_square()
|
Create a new example of shape using Facade pattern
|
Create a new example of shape using Facade pattern
|
Python
|
mit
|
rolandovillca/python_basis,rolandovillca/python_introduction_basic,rolandovillca/python_basic_introduction,rolandovillca/python_basic_concepts
|
Create a new example of shape using Facade pattern
|
import abc
class Shape(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def draw(self):
pass
class Rectangle(Shape):
def __init__(self):
super(Rectangle, self).__init__()
def draw(self):
print 'Drawing Rectangle...'
class Square(Shape):
def __init__(self):
super(Square, self).__init__()
def draw(self):
print 'Drawing Square...'
class DemoFacade():
def __init__(self):
self.rectangle = Rectangle()
self.square = Square()
def draw_rectangle(self):
self.rectangle.draw()
def draw_square(self):
self.square.draw()
# main class.
if __name__ == '__main__':
shape_facade = DemoFacade()
shape_facade.draw_rectangle()
shape_facade.draw_square()
|
<commit_before><commit_msg>Create a new example of shape using Facade pattern<commit_after>
|
import abc
class Shape(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def draw(self):
pass
class Rectangle(Shape):
def __init__(self):
super(Rectangle, self).__init__()
def draw(self):
print 'Drawing Rectangle...'
class Square(Shape):
def __init__(self):
super(Square, self).__init__()
def draw(self):
print 'Drawing Square...'
class DemoFacade():
def __init__(self):
self.rectangle = Rectangle()
self.square = Square()
def draw_rectangle(self):
self.rectangle.draw()
def draw_square(self):
self.square.draw()
# main class.
if __name__ == '__main__':
shape_facade = DemoFacade()
shape_facade.draw_rectangle()
shape_facade.draw_square()
|
Create a new example of shape using Facade patternimport abc
class Shape(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def draw(self):
pass
class Rectangle(Shape):
def __init__(self):
super(Rectangle, self).__init__()
def draw(self):
print 'Drawing Rectangle...'
class Square(Shape):
def __init__(self):
super(Square, self).__init__()
def draw(self):
print 'Drawing Square...'
class DemoFacade():
def __init__(self):
self.rectangle = Rectangle()
self.square = Square()
def draw_rectangle(self):
self.rectangle.draw()
def draw_square(self):
self.square.draw()
# main class.
if __name__ == '__main__':
shape_facade = DemoFacade()
shape_facade.draw_rectangle()
shape_facade.draw_square()
|
<commit_before><commit_msg>Create a new example of shape using Facade pattern<commit_after>import abc
class Shape(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def draw(self):
pass
class Rectangle(Shape):
def __init__(self):
super(Rectangle, self).__init__()
def draw(self):
print 'Drawing Rectangle...'
class Square(Shape):
def __init__(self):
super(Square, self).__init__()
def draw(self):
print 'Drawing Square...'
class DemoFacade():
def __init__(self):
self.rectangle = Rectangle()
self.square = Square()
def draw_rectangle(self):
self.rectangle.draw()
def draw_square(self):
self.square.draw()
# main class.
if __name__ == '__main__':
shape_facade = DemoFacade()
shape_facade.draw_rectangle()
shape_facade.draw_square()
|
|
681871b7b7271d8431e8d92a29d8ab02e9d9ba0d
|
contrib/linux/tests/test_action_dig.py
|
contrib/linux/tests/test_action_dig.py
|
#!/usr/bin/env python
# Copyright 2020 The StackStorm Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from st2tests.base import BaseActionTestCase
from dig import DigAction
class DigActionTestCase(BaseActionTestCase):
action_cls = DigAction
def test_run(self):
action = self.get_action_instance()
# Use the defaults from dig.yaml
result = action.run(rand=False, count=0, nameserver=None, hostname='', queryopts='short')
self.assertIsInstance(result, list)
self.assertEqual(len(result), 0)
result = action.run(rand=False, count=0, nameserver=None, hostname='google.com',
queryopts='')
self.assertIsInstance(result, list)
self.assertGreater(len(result), 0)
|
Add initial tests for linux.dig action
|
Add initial tests for linux.dig action
|
Python
|
apache-2.0
|
StackStorm/st2,Plexxi/st2,nzlosh/st2,StackStorm/st2,nzlosh/st2,nzlosh/st2,Plexxi/st2,Plexxi/st2,StackStorm/st2,nzlosh/st2,StackStorm/st2,Plexxi/st2
|
Add initial tests for linux.dig action
|
#!/usr/bin/env python
# Copyright 2020 The StackStorm Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from st2tests.base import BaseActionTestCase
from dig import DigAction
class DigActionTestCase(BaseActionTestCase):
action_cls = DigAction
def test_run(self):
action = self.get_action_instance()
# Use the defaults from dig.yaml
result = action.run(rand=False, count=0, nameserver=None, hostname='', queryopts='short')
self.assertIsInstance(result, list)
self.assertEqual(len(result), 0)
result = action.run(rand=False, count=0, nameserver=None, hostname='google.com',
queryopts='')
self.assertIsInstance(result, list)
self.assertGreater(len(result), 0)
|
<commit_before><commit_msg>Add initial tests for linux.dig action<commit_after>
|
#!/usr/bin/env python
# Copyright 2020 The StackStorm Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from st2tests.base import BaseActionTestCase
from dig import DigAction
class DigActionTestCase(BaseActionTestCase):
action_cls = DigAction
def test_run(self):
action = self.get_action_instance()
# Use the defaults from dig.yaml
result = action.run(rand=False, count=0, nameserver=None, hostname='', queryopts='short')
self.assertIsInstance(result, list)
self.assertEqual(len(result), 0)
result = action.run(rand=False, count=0, nameserver=None, hostname='google.com',
queryopts='')
self.assertIsInstance(result, list)
self.assertGreater(len(result), 0)
|
Add initial tests for linux.dig action#!/usr/bin/env python
# Copyright 2020 The StackStorm Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from st2tests.base import BaseActionTestCase
from dig import DigAction
class DigActionTestCase(BaseActionTestCase):
action_cls = DigAction
def test_run(self):
action = self.get_action_instance()
# Use the defaults from dig.yaml
result = action.run(rand=False, count=0, nameserver=None, hostname='', queryopts='short')
self.assertIsInstance(result, list)
self.assertEqual(len(result), 0)
result = action.run(rand=False, count=0, nameserver=None, hostname='google.com',
queryopts='')
self.assertIsInstance(result, list)
self.assertGreater(len(result), 0)
|
<commit_before><commit_msg>Add initial tests for linux.dig action<commit_after>#!/usr/bin/env python
# Copyright 2020 The StackStorm Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from st2tests.base import BaseActionTestCase
from dig import DigAction
class DigActionTestCase(BaseActionTestCase):
action_cls = DigAction
def test_run(self):
action = self.get_action_instance()
# Use the defaults from dig.yaml
result = action.run(rand=False, count=0, nameserver=None, hostname='', queryopts='short')
self.assertIsInstance(result, list)
self.assertEqual(len(result), 0)
result = action.run(rand=False, count=0, nameserver=None, hostname='google.com',
queryopts='')
self.assertIsInstance(result, list)
self.assertGreater(len(result), 0)
|
|
0224af7414bdfff6b4bd3118ba50cfc08ed60215
|
py/expression-add-operators.py
|
py/expression-add-operators.py
|
from collections import defaultdict
class Solution(object):
def dfs_ans(self, ans, depth, lans, ans_list):
if depth == lans:
yield ''.join(ans_list)
else:
if isinstance(ans[depth], set):
for x in ans[depth]:
ans_list.append(x)
for y in self.dfs_ans(ans, depth + 1, lans, ans_list):
yield y
ans_list.pop()
else:
ans_list.append(ans[depth])
for y in self.dfs_ans(ans, depth + 1, lans, ans_list):
yield y
ans_list.pop()
def dfs(self, dp, end, target, depth, cur, ans):
if end == depth and cur == target:
for x in self.dfs_ans(ans, 0, len(ans), []):
yield x
else:
for nd in xrange(depth + 1, end + 1):
for possible_v, possible_set in dp[depth, nd].iteritems():
if depth > 0:
ans.append('+')
ans.append(possible_set)
for x in self.dfs(dp, end, target, nd, cur + possible_v, ans):
yield x
ans.pop()
if depth > 0:
ans.pop()
for x in self.dfs(dp, end, target, nd, cur - possible_v, ans + ['-', possible_set]):
yield x
def addOperators(self, num, target):
"""
:type num: str
:type target: int
:rtype: List[str]
"""
dp = defaultdict(lambda:defaultdict(set))
# merge phase
lnum = len(num)
for i in xrange(1, lnum + 1):
for j in xrange(i):
if num[j] != '0' or j + 1 == i:
dp[j, i][int(num[j:i])].add(num[j:i])
# multiple phase
for l in xrange(2, lnum + 1):
for i in xrange(lnum - l + 1):
for j in xrange(i + 1, i + l):
for a, a_set in dp[i, j].iteritems():
for b, b_set in dp[j, i + l].iteritems():
for a_ in a_set:
if '*' not in a_:
for b_ in b_set:
dp[i, i + l][a * b].add(a_ + '*' + b_)
return list(self.dfs(dp, lnum, target, 0, 0, []))
|
Add py solution for 282. Expression Add Operators
|
Add py solution for 282. Expression Add Operators
282. Expression Add Operators: https://leetcode.com/problems/expression-add-operators/
Way too ugly...
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 282. Expression Add Operators
282. Expression Add Operators: https://leetcode.com/problems/expression-add-operators/
Way too ugly...
|
from collections import defaultdict
class Solution(object):
def dfs_ans(self, ans, depth, lans, ans_list):
if depth == lans:
yield ''.join(ans_list)
else:
if isinstance(ans[depth], set):
for x in ans[depth]:
ans_list.append(x)
for y in self.dfs_ans(ans, depth + 1, lans, ans_list):
yield y
ans_list.pop()
else:
ans_list.append(ans[depth])
for y in self.dfs_ans(ans, depth + 1, lans, ans_list):
yield y
ans_list.pop()
def dfs(self, dp, end, target, depth, cur, ans):
if end == depth and cur == target:
for x in self.dfs_ans(ans, 0, len(ans), []):
yield x
else:
for nd in xrange(depth + 1, end + 1):
for possible_v, possible_set in dp[depth, nd].iteritems():
if depth > 0:
ans.append('+')
ans.append(possible_set)
for x in self.dfs(dp, end, target, nd, cur + possible_v, ans):
yield x
ans.pop()
if depth > 0:
ans.pop()
for x in self.dfs(dp, end, target, nd, cur - possible_v, ans + ['-', possible_set]):
yield x
def addOperators(self, num, target):
"""
:type num: str
:type target: int
:rtype: List[str]
"""
dp = defaultdict(lambda:defaultdict(set))
# merge phase
lnum = len(num)
for i in xrange(1, lnum + 1):
for j in xrange(i):
if num[j] != '0' or j + 1 == i:
dp[j, i][int(num[j:i])].add(num[j:i])
# multiple phase
for l in xrange(2, lnum + 1):
for i in xrange(lnum - l + 1):
for j in xrange(i + 1, i + l):
for a, a_set in dp[i, j].iteritems():
for b, b_set in dp[j, i + l].iteritems():
for a_ in a_set:
if '*' not in a_:
for b_ in b_set:
dp[i, i + l][a * b].add(a_ + '*' + b_)
return list(self.dfs(dp, lnum, target, 0, 0, []))
|
<commit_before><commit_msg>Add py solution for 282. Expression Add Operators
282. Expression Add Operators: https://leetcode.com/problems/expression-add-operators/
Way too ugly...<commit_after>
|
from collections import defaultdict
class Solution(object):
def dfs_ans(self, ans, depth, lans, ans_list):
if depth == lans:
yield ''.join(ans_list)
else:
if isinstance(ans[depth], set):
for x in ans[depth]:
ans_list.append(x)
for y in self.dfs_ans(ans, depth + 1, lans, ans_list):
yield y
ans_list.pop()
else:
ans_list.append(ans[depth])
for y in self.dfs_ans(ans, depth + 1, lans, ans_list):
yield y
ans_list.pop()
def dfs(self, dp, end, target, depth, cur, ans):
if end == depth and cur == target:
for x in self.dfs_ans(ans, 0, len(ans), []):
yield x
else:
for nd in xrange(depth + 1, end + 1):
for possible_v, possible_set in dp[depth, nd].iteritems():
if depth > 0:
ans.append('+')
ans.append(possible_set)
for x in self.dfs(dp, end, target, nd, cur + possible_v, ans):
yield x
ans.pop()
if depth > 0:
ans.pop()
for x in self.dfs(dp, end, target, nd, cur - possible_v, ans + ['-', possible_set]):
yield x
def addOperators(self, num, target):
"""
:type num: str
:type target: int
:rtype: List[str]
"""
dp = defaultdict(lambda:defaultdict(set))
# merge phase
lnum = len(num)
for i in xrange(1, lnum + 1):
for j in xrange(i):
if num[j] != '0' or j + 1 == i:
dp[j, i][int(num[j:i])].add(num[j:i])
# multiple phase
for l in xrange(2, lnum + 1):
for i in xrange(lnum - l + 1):
for j in xrange(i + 1, i + l):
for a, a_set in dp[i, j].iteritems():
for b, b_set in dp[j, i + l].iteritems():
for a_ in a_set:
if '*' not in a_:
for b_ in b_set:
dp[i, i + l][a * b].add(a_ + '*' + b_)
return list(self.dfs(dp, lnum, target, 0, 0, []))
|
Add py solution for 282. Expression Add Operators
282. Expression Add Operators: https://leetcode.com/problems/expression-add-operators/
Way too ugly...from collections import defaultdict
class Solution(object):
def dfs_ans(self, ans, depth, lans, ans_list):
if depth == lans:
yield ''.join(ans_list)
else:
if isinstance(ans[depth], set):
for x in ans[depth]:
ans_list.append(x)
for y in self.dfs_ans(ans, depth + 1, lans, ans_list):
yield y
ans_list.pop()
else:
ans_list.append(ans[depth])
for y in self.dfs_ans(ans, depth + 1, lans, ans_list):
yield y
ans_list.pop()
def dfs(self, dp, end, target, depth, cur, ans):
if end == depth and cur == target:
for x in self.dfs_ans(ans, 0, len(ans), []):
yield x
else:
for nd in xrange(depth + 1, end + 1):
for possible_v, possible_set in dp[depth, nd].iteritems():
if depth > 0:
ans.append('+')
ans.append(possible_set)
for x in self.dfs(dp, end, target, nd, cur + possible_v, ans):
yield x
ans.pop()
if depth > 0:
ans.pop()
for x in self.dfs(dp, end, target, nd, cur - possible_v, ans + ['-', possible_set]):
yield x
def addOperators(self, num, target):
"""
:type num: str
:type target: int
:rtype: List[str]
"""
dp = defaultdict(lambda:defaultdict(set))
# merge phase
lnum = len(num)
for i in xrange(1, lnum + 1):
for j in xrange(i):
if num[j] != '0' or j + 1 == i:
dp[j, i][int(num[j:i])].add(num[j:i])
# multiple phase
for l in xrange(2, lnum + 1):
for i in xrange(lnum - l + 1):
for j in xrange(i + 1, i + l):
for a, a_set in dp[i, j].iteritems():
for b, b_set in dp[j, i + l].iteritems():
for a_ in a_set:
if '*' not in a_:
for b_ in b_set:
dp[i, i + l][a * b].add(a_ + '*' + b_)
return list(self.dfs(dp, lnum, target, 0, 0, []))
|
<commit_before><commit_msg>Add py solution for 282. Expression Add Operators
282. Expression Add Operators: https://leetcode.com/problems/expression-add-operators/
Way too ugly...<commit_after>from collections import defaultdict
class Solution(object):
def dfs_ans(self, ans, depth, lans, ans_list):
if depth == lans:
yield ''.join(ans_list)
else:
if isinstance(ans[depth], set):
for x in ans[depth]:
ans_list.append(x)
for y in self.dfs_ans(ans, depth + 1, lans, ans_list):
yield y
ans_list.pop()
else:
ans_list.append(ans[depth])
for y in self.dfs_ans(ans, depth + 1, lans, ans_list):
yield y
ans_list.pop()
def dfs(self, dp, end, target, depth, cur, ans):
if end == depth and cur == target:
for x in self.dfs_ans(ans, 0, len(ans), []):
yield x
else:
for nd in xrange(depth + 1, end + 1):
for possible_v, possible_set in dp[depth, nd].iteritems():
if depth > 0:
ans.append('+')
ans.append(possible_set)
for x in self.dfs(dp, end, target, nd, cur + possible_v, ans):
yield x
ans.pop()
if depth > 0:
ans.pop()
for x in self.dfs(dp, end, target, nd, cur - possible_v, ans + ['-', possible_set]):
yield x
def addOperators(self, num, target):
"""
:type num: str
:type target: int
:rtype: List[str]
"""
dp = defaultdict(lambda:defaultdict(set))
# merge phase
lnum = len(num)
for i in xrange(1, lnum + 1):
for j in xrange(i):
if num[j] != '0' or j + 1 == i:
dp[j, i][int(num[j:i])].add(num[j:i])
# multiple phase
for l in xrange(2, lnum + 1):
for i in xrange(lnum - l + 1):
for j in xrange(i + 1, i + l):
for a, a_set in dp[i, j].iteritems():
for b, b_set in dp[j, i + l].iteritems():
for a_ in a_set:
if '*' not in a_:
for b_ in b_set:
dp[i, i + l][a * b].add(a_ + '*' + b_)
return list(self.dfs(dp, lnum, target, 0, 0, []))
|
|
192871bcf6fe0881a0b0aface4306cb6ec93710e
|
test/benchmarks/stepping/TestRunHooksThenSteppings.py
|
test/benchmarks/stepping/TestRunHooksThenSteppings.py
|
"""Test lldb's stepping speed."""
import os, sys
import unittest2
import lldb
import pexpect
from lldbbench import *
class RunHooksThenSteppingsBench(BenchBase):
mydir = os.path.join("benchmarks", "stepping")
def setUp(self):
BenchBase.setUp(self)
self.stepping_avg = None
@benchmarks_test
def test_lldb_runhooks_then_steppings(self):
"""Test lldb steppings on a large executable."""
print
self.run_lldb_runhooks_then_steppings(50)
print "lldb stepping benchmark:", self.stopwatch
def run_lldb_runhooks_then_steppings(self, count):
# Set self.child_prompt, which is "(lldb) ".
self.child_prompt = '(lldb) '
prompt = self.child_prompt
self.child = pexpect.spawn('%s %s' % (self.lldbHere, self.lldbOption))
self.child.expect_exact(prompt)
# So that the child gets torn down after the test.
child = self.child
# Turn on logging for what the child sends back.
if self.TraceOn():
child.logfile_read = sys.stdout
#lldb.runHooks = ['process attach -n Mail']
# Perform the run hooks to bring lldb debugger to the desired state.
if not lldb.runHooks:
self.skipTest("No runhooks specified for lldb, skip the test")
for hook in lldb.runHooks:
child.sendline(hook)
child.expect_exact(prompt)
# Reset the stopwatch now.
self.stopwatch.reset()
for i in range(count):
with self.stopwatch:
# Disassemble the function.
child.sendline('next') # Aka 'thread step-over'.
child.expect_exact(prompt)
child.sendline('quit')
try:
self.child.expect(pexpect.EOF)
except:
pass
self.stepping_avg = self.stopwatch.avg()
if self.TraceOn():
print "lldb stepping benchmark:", str(self.stopwatch)
self.child = None
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
|
Add a more generic stepping benchmark, which uses the '-k' option of the test driver to be able to specify the runhook(s) to bring the debug session to a certain state before running the benchmarking logic. An example,
|
Add a more generic stepping benchmark, which uses the '-k' option of the test driver
to be able to specify the runhook(s) to bring the debug session to a certain state
before running the benchmarking logic. An example,
./dotest.py -v -t +b -k 'process attach -n Mail' -k 'thread backtrace all' -p TestRunHooksThenSteppings.py
spawns lldb, attaches to the 'Mail' application, does a backtrace for all threads, and then
runs the benchmark to step the inferior multiple times.
git-svn-id: b33bab8abb5b18c12ee100cd7761ab452d00b2b0@141740 91177308-0d34-0410-b5e6-96231b3b80d8
|
Python
|
apache-2.0
|
apple/swift-lldb,llvm-mirror/lldb,llvm-mirror/lldb,llvm-mirror/lldb,apple/swift-lldb,apple/swift-lldb,apple/swift-lldb,llvm-mirror/lldb,apple/swift-lldb,apple/swift-lldb,llvm-mirror/lldb
|
Add a more generic stepping benchmark, which uses the '-k' option of the test driver
to be able to specify the runhook(s) to bring the debug session to a certain state
before running the benchmarking logic. An example,
./dotest.py -v -t +b -k 'process attach -n Mail' -k 'thread backtrace all' -p TestRunHooksThenSteppings.py
spawns lldb, attaches to the 'Mail' application, does a backtrace for all threads, and then
runs the benchmark to step the inferior multiple times.
git-svn-id: b33bab8abb5b18c12ee100cd7761ab452d00b2b0@141740 91177308-0d34-0410-b5e6-96231b3b80d8
|
"""Test lldb's stepping speed."""
import os, sys
import unittest2
import lldb
import pexpect
from lldbbench import *
class RunHooksThenSteppingsBench(BenchBase):
mydir = os.path.join("benchmarks", "stepping")
def setUp(self):
BenchBase.setUp(self)
self.stepping_avg = None
@benchmarks_test
def test_lldb_runhooks_then_steppings(self):
"""Test lldb steppings on a large executable."""
print
self.run_lldb_runhooks_then_steppings(50)
print "lldb stepping benchmark:", self.stopwatch
def run_lldb_runhooks_then_steppings(self, count):
# Set self.child_prompt, which is "(lldb) ".
self.child_prompt = '(lldb) '
prompt = self.child_prompt
self.child = pexpect.spawn('%s %s' % (self.lldbHere, self.lldbOption))
self.child.expect_exact(prompt)
# So that the child gets torn down after the test.
child = self.child
# Turn on logging for what the child sends back.
if self.TraceOn():
child.logfile_read = sys.stdout
#lldb.runHooks = ['process attach -n Mail']
# Perform the run hooks to bring lldb debugger to the desired state.
if not lldb.runHooks:
self.skipTest("No runhooks specified for lldb, skip the test")
for hook in lldb.runHooks:
child.sendline(hook)
child.expect_exact(prompt)
# Reset the stopwatch now.
self.stopwatch.reset()
for i in range(count):
with self.stopwatch:
# Disassemble the function.
child.sendline('next') # Aka 'thread step-over'.
child.expect_exact(prompt)
child.sendline('quit')
try:
self.child.expect(pexpect.EOF)
except:
pass
self.stepping_avg = self.stopwatch.avg()
if self.TraceOn():
print "lldb stepping benchmark:", str(self.stopwatch)
self.child = None
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
|
<commit_before><commit_msg>Add a more generic stepping benchmark, which uses the '-k' option of the test driver
to be able to specify the runhook(s) to bring the debug session to a certain state
before running the benchmarking logic. An example,
./dotest.py -v -t +b -k 'process attach -n Mail' -k 'thread backtrace all' -p TestRunHooksThenSteppings.py
spawns lldb, attaches to the 'Mail' application, does a backtrace for all threads, and then
runs the benchmark to step the inferior multiple times.
git-svn-id: b33bab8abb5b18c12ee100cd7761ab452d00b2b0@141740 91177308-0d34-0410-b5e6-96231b3b80d8<commit_after>
|
"""Test lldb's stepping speed."""
import os, sys
import unittest2
import lldb
import pexpect
from lldbbench import *
class RunHooksThenSteppingsBench(BenchBase):
mydir = os.path.join("benchmarks", "stepping")
def setUp(self):
BenchBase.setUp(self)
self.stepping_avg = None
@benchmarks_test
def test_lldb_runhooks_then_steppings(self):
"""Test lldb steppings on a large executable."""
print
self.run_lldb_runhooks_then_steppings(50)
print "lldb stepping benchmark:", self.stopwatch
def run_lldb_runhooks_then_steppings(self, count):
# Set self.child_prompt, which is "(lldb) ".
self.child_prompt = '(lldb) '
prompt = self.child_prompt
self.child = pexpect.spawn('%s %s' % (self.lldbHere, self.lldbOption))
self.child.expect_exact(prompt)
# So that the child gets torn down after the test.
child = self.child
# Turn on logging for what the child sends back.
if self.TraceOn():
child.logfile_read = sys.stdout
#lldb.runHooks = ['process attach -n Mail']
# Perform the run hooks to bring lldb debugger to the desired state.
if not lldb.runHooks:
self.skipTest("No runhooks specified for lldb, skip the test")
for hook in lldb.runHooks:
child.sendline(hook)
child.expect_exact(prompt)
# Reset the stopwatch now.
self.stopwatch.reset()
for i in range(count):
with self.stopwatch:
# Disassemble the function.
child.sendline('next') # Aka 'thread step-over'.
child.expect_exact(prompt)
child.sendline('quit')
try:
self.child.expect(pexpect.EOF)
except:
pass
self.stepping_avg = self.stopwatch.avg()
if self.TraceOn():
print "lldb stepping benchmark:", str(self.stopwatch)
self.child = None
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
|
Add a more generic stepping benchmark, which uses the '-k' option of the test driver
to be able to specify the runhook(s) to bring the debug session to a certain state
before running the benchmarking logic. An example,
./dotest.py -v -t +b -k 'process attach -n Mail' -k 'thread backtrace all' -p TestRunHooksThenSteppings.py
spawns lldb, attaches to the 'Mail' application, does a backtrace for all threads, and then
runs the benchmark to step the inferior multiple times.
git-svn-id: b33bab8abb5b18c12ee100cd7761ab452d00b2b0@141740 91177308-0d34-0410-b5e6-96231b3b80d8"""Test lldb's stepping speed."""
import os, sys
import unittest2
import lldb
import pexpect
from lldbbench import *
class RunHooksThenSteppingsBench(BenchBase):
mydir = os.path.join("benchmarks", "stepping")
def setUp(self):
BenchBase.setUp(self)
self.stepping_avg = None
@benchmarks_test
def test_lldb_runhooks_then_steppings(self):
"""Test lldb steppings on a large executable."""
print
self.run_lldb_runhooks_then_steppings(50)
print "lldb stepping benchmark:", self.stopwatch
def run_lldb_runhooks_then_steppings(self, count):
# Set self.child_prompt, which is "(lldb) ".
self.child_prompt = '(lldb) '
prompt = self.child_prompt
self.child = pexpect.spawn('%s %s' % (self.lldbHere, self.lldbOption))
self.child.expect_exact(prompt)
# So that the child gets torn down after the test.
child = self.child
# Turn on logging for what the child sends back.
if self.TraceOn():
child.logfile_read = sys.stdout
#lldb.runHooks = ['process attach -n Mail']
# Perform the run hooks to bring lldb debugger to the desired state.
if not lldb.runHooks:
self.skipTest("No runhooks specified for lldb, skip the test")
for hook in lldb.runHooks:
child.sendline(hook)
child.expect_exact(prompt)
# Reset the stopwatch now.
self.stopwatch.reset()
for i in range(count):
with self.stopwatch:
# Disassemble the function.
child.sendline('next') # Aka 'thread step-over'.
child.expect_exact(prompt)
child.sendline('quit')
try:
self.child.expect(pexpect.EOF)
except:
pass
self.stepping_avg = self.stopwatch.avg()
if self.TraceOn():
print "lldb stepping benchmark:", str(self.stopwatch)
self.child = None
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
|
<commit_before><commit_msg>Add a more generic stepping benchmark, which uses the '-k' option of the test driver
to be able to specify the runhook(s) to bring the debug session to a certain state
before running the benchmarking logic. An example,
./dotest.py -v -t +b -k 'process attach -n Mail' -k 'thread backtrace all' -p TestRunHooksThenSteppings.py
spawns lldb, attaches to the 'Mail' application, does a backtrace for all threads, and then
runs the benchmark to step the inferior multiple times.
git-svn-id: b33bab8abb5b18c12ee100cd7761ab452d00b2b0@141740 91177308-0d34-0410-b5e6-96231b3b80d8<commit_after>"""Test lldb's stepping speed."""
import os, sys
import unittest2
import lldb
import pexpect
from lldbbench import *
class RunHooksThenSteppingsBench(BenchBase):
mydir = os.path.join("benchmarks", "stepping")
def setUp(self):
BenchBase.setUp(self)
self.stepping_avg = None
@benchmarks_test
def test_lldb_runhooks_then_steppings(self):
"""Test lldb steppings on a large executable."""
print
self.run_lldb_runhooks_then_steppings(50)
print "lldb stepping benchmark:", self.stopwatch
def run_lldb_runhooks_then_steppings(self, count):
# Set self.child_prompt, which is "(lldb) ".
self.child_prompt = '(lldb) '
prompt = self.child_prompt
self.child = pexpect.spawn('%s %s' % (self.lldbHere, self.lldbOption))
self.child.expect_exact(prompt)
# So that the child gets torn down after the test.
child = self.child
# Turn on logging for what the child sends back.
if self.TraceOn():
child.logfile_read = sys.stdout
#lldb.runHooks = ['process attach -n Mail']
# Perform the run hooks to bring lldb debugger to the desired state.
if not lldb.runHooks:
self.skipTest("No runhooks specified for lldb, skip the test")
for hook in lldb.runHooks:
child.sendline(hook)
child.expect_exact(prompt)
# Reset the stopwatch now.
self.stopwatch.reset()
for i in range(count):
with self.stopwatch:
# Disassemble the function.
child.sendline('next') # Aka 'thread step-over'.
child.expect_exact(prompt)
child.sendline('quit')
try:
self.child.expect(pexpect.EOF)
except:
pass
self.stepping_avg = self.stopwatch.avg()
if self.TraceOn():
print "lldb stepping benchmark:", str(self.stopwatch)
self.child = None
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
|
|
dd0ee85ef6e36d3e384ac5d20924acb4fd5f3108
|
tests/commands/logs_test.py
|
tests/commands/logs_test.py
|
from mock import patch
from ..utils import DustyTestCase
from dusty.commands.logs import tail_container_logs
class TestLogsCommands(DustyTestCase):
@patch('dusty.commands.logs.exec_docker')
@patch('dusty.commands.logs.get_dusty_containers')
def test_tail_container_logs(self, fake_get_containers, fake_exec_docker):
fake_get_containers.return_value = [{'Id': 'container-id'}]
tail_container_logs('app-a')
fake_get_containers.assert_called_once_with(['app-a'])
fake_exec_docker.assert_called_once_with('logs', '-f', 'container-id')
|
Add tests for the log command
|
Add tests for the log command
|
Python
|
mit
|
gamechanger/dusty,gamechanger/dusty,gamechanger/dusty,gamechanger/dusty,gamechanger/dusty
|
Add tests for the log command
|
from mock import patch
from ..utils import DustyTestCase
from dusty.commands.logs import tail_container_logs
class TestLogsCommands(DustyTestCase):
@patch('dusty.commands.logs.exec_docker')
@patch('dusty.commands.logs.get_dusty_containers')
def test_tail_container_logs(self, fake_get_containers, fake_exec_docker):
fake_get_containers.return_value = [{'Id': 'container-id'}]
tail_container_logs('app-a')
fake_get_containers.assert_called_once_with(['app-a'])
fake_exec_docker.assert_called_once_with('logs', '-f', 'container-id')
|
<commit_before><commit_msg>Add tests for the log command<commit_after>
|
from mock import patch
from ..utils import DustyTestCase
from dusty.commands.logs import tail_container_logs
class TestLogsCommands(DustyTestCase):
@patch('dusty.commands.logs.exec_docker')
@patch('dusty.commands.logs.get_dusty_containers')
def test_tail_container_logs(self, fake_get_containers, fake_exec_docker):
fake_get_containers.return_value = [{'Id': 'container-id'}]
tail_container_logs('app-a')
fake_get_containers.assert_called_once_with(['app-a'])
fake_exec_docker.assert_called_once_with('logs', '-f', 'container-id')
|
Add tests for the log commandfrom mock import patch
from ..utils import DustyTestCase
from dusty.commands.logs import tail_container_logs
class TestLogsCommands(DustyTestCase):
@patch('dusty.commands.logs.exec_docker')
@patch('dusty.commands.logs.get_dusty_containers')
def test_tail_container_logs(self, fake_get_containers, fake_exec_docker):
fake_get_containers.return_value = [{'Id': 'container-id'}]
tail_container_logs('app-a')
fake_get_containers.assert_called_once_with(['app-a'])
fake_exec_docker.assert_called_once_with('logs', '-f', 'container-id')
|
<commit_before><commit_msg>Add tests for the log command<commit_after>from mock import patch
from ..utils import DustyTestCase
from dusty.commands.logs import tail_container_logs
class TestLogsCommands(DustyTestCase):
@patch('dusty.commands.logs.exec_docker')
@patch('dusty.commands.logs.get_dusty_containers')
def test_tail_container_logs(self, fake_get_containers, fake_exec_docker):
fake_get_containers.return_value = [{'Id': 'container-id'}]
tail_container_logs('app-a')
fake_get_containers.assert_called_once_with(['app-a'])
fake_exec_docker.assert_called_once_with('logs', '-f', 'container-id')
|
|
0c39e2f5774b78ca5025e8ffe0fbde4ab2e86abf
|
tests/test_summary_class.py
|
tests/test_summary_class.py
|
# coding: utf8
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""Test text-based summary reporter for coverage.py"""
import collections
import unittest
import os.path
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from coverage import summary, data, control, config
LINES_1 = {
__file__: {-1: 1, 7: 1},
os.path.join(os.path.dirname(__file__), 'helpers.py'): {-1: 1, 7: 1},
}
class TestSummaryReporterConfiguration(unittest.TestCase):
def get_coverage_data(self, lines=LINES_1):
"""Get a CoverageData object that includes the requested lines."""
data1 = data.CoverageData()
data1.add_lines(lines)
return data1
def get_summary_text(self, coverage_data, options):
"""Get text output from the SummaryReporter."""
cov = control.Coverage()
cov.data = coverage_data
printer = summary.SummaryReporter(cov, options)
destination = StringIO()
printer.report([], destination)
return destination.getvalue()
def test_defaults(self):
"""Run the report with no configuration options."""
data = self.get_coverage_data()
opts = config.CoverageConfig()
report = self.get_summary_text(data, opts)
self.assertNotIn('Missing', report)
self.assertNotIn('Branch', report)
def test_print_missing(self):
"""Run the report printing the missing lines."""
data = self.get_coverage_data()
opts = config.CoverageConfig()
opts.from_args(show_missing=True)
report = self.get_summary_text(data, opts)
self.assertIn('Missing', report)
self.assertNotIn('Branch', report)
|
Add unit-level test for the SummaryReporter Tests configuration of the report method of SummaryReader
|
Add unit-level test for the SummaryReporter
Tests configuration of the report method of SummaryReader
|
Python
|
apache-2.0
|
hugovk/coveragepy,hugovk/coveragepy,blueyed/coveragepy,blueyed/coveragepy,blueyed/coveragepy,nedbat/coveragepy,hugovk/coveragepy,nedbat/coveragepy,nedbat/coveragepy,blueyed/coveragepy,nedbat/coveragepy,blueyed/coveragepy,nedbat/coveragepy,hugovk/coveragepy,hugovk/coveragepy
|
Add unit-level test for the SummaryReporter
Tests configuration of the report method of SummaryReader
|
# coding: utf8
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""Test text-based summary reporter for coverage.py"""
import collections
import unittest
import os.path
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from coverage import summary, data, control, config
LINES_1 = {
__file__: {-1: 1, 7: 1},
os.path.join(os.path.dirname(__file__), 'helpers.py'): {-1: 1, 7: 1},
}
class TestSummaryReporterConfiguration(unittest.TestCase):
def get_coverage_data(self, lines=LINES_1):
"""Get a CoverageData object that includes the requested lines."""
data1 = data.CoverageData()
data1.add_lines(lines)
return data1
def get_summary_text(self, coverage_data, options):
"""Get text output from the SummaryReporter."""
cov = control.Coverage()
cov.data = coverage_data
printer = summary.SummaryReporter(cov, options)
destination = StringIO()
printer.report([], destination)
return destination.getvalue()
def test_defaults(self):
"""Run the report with no configuration options."""
data = self.get_coverage_data()
opts = config.CoverageConfig()
report = self.get_summary_text(data, opts)
self.assertNotIn('Missing', report)
self.assertNotIn('Branch', report)
def test_print_missing(self):
"""Run the report printing the missing lines."""
data = self.get_coverage_data()
opts = config.CoverageConfig()
opts.from_args(show_missing=True)
report = self.get_summary_text(data, opts)
self.assertIn('Missing', report)
self.assertNotIn('Branch', report)
|
<commit_before><commit_msg>Add unit-level test for the SummaryReporter
Tests configuration of the report method of SummaryReader<commit_after>
|
# coding: utf8
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""Test text-based summary reporter for coverage.py"""
import collections
import unittest
import os.path
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from coverage import summary, data, control, config
LINES_1 = {
__file__: {-1: 1, 7: 1},
os.path.join(os.path.dirname(__file__), 'helpers.py'): {-1: 1, 7: 1},
}
class TestSummaryReporterConfiguration(unittest.TestCase):
def get_coverage_data(self, lines=LINES_1):
"""Get a CoverageData object that includes the requested lines."""
data1 = data.CoverageData()
data1.add_lines(lines)
return data1
def get_summary_text(self, coverage_data, options):
"""Get text output from the SummaryReporter."""
cov = control.Coverage()
cov.data = coverage_data
printer = summary.SummaryReporter(cov, options)
destination = StringIO()
printer.report([], destination)
return destination.getvalue()
def test_defaults(self):
"""Run the report with no configuration options."""
data = self.get_coverage_data()
opts = config.CoverageConfig()
report = self.get_summary_text(data, opts)
self.assertNotIn('Missing', report)
self.assertNotIn('Branch', report)
def test_print_missing(self):
"""Run the report printing the missing lines."""
data = self.get_coverage_data()
opts = config.CoverageConfig()
opts.from_args(show_missing=True)
report = self.get_summary_text(data, opts)
self.assertIn('Missing', report)
self.assertNotIn('Branch', report)
|
Add unit-level test for the SummaryReporter
Tests configuration of the report method of SummaryReader# coding: utf8
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""Test text-based summary reporter for coverage.py"""
import collections
import unittest
import os.path
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from coverage import summary, data, control, config
LINES_1 = {
__file__: {-1: 1, 7: 1},
os.path.join(os.path.dirname(__file__), 'helpers.py'): {-1: 1, 7: 1},
}
class TestSummaryReporterConfiguration(unittest.TestCase):
def get_coverage_data(self, lines=LINES_1):
"""Get a CoverageData object that includes the requested lines."""
data1 = data.CoverageData()
data1.add_lines(lines)
return data1
def get_summary_text(self, coverage_data, options):
"""Get text output from the SummaryReporter."""
cov = control.Coverage()
cov.data = coverage_data
printer = summary.SummaryReporter(cov, options)
destination = StringIO()
printer.report([], destination)
return destination.getvalue()
def test_defaults(self):
"""Run the report with no configuration options."""
data = self.get_coverage_data()
opts = config.CoverageConfig()
report = self.get_summary_text(data, opts)
self.assertNotIn('Missing', report)
self.assertNotIn('Branch', report)
def test_print_missing(self):
"""Run the report printing the missing lines."""
data = self.get_coverage_data()
opts = config.CoverageConfig()
opts.from_args(show_missing=True)
report = self.get_summary_text(data, opts)
self.assertIn('Missing', report)
self.assertNotIn('Branch', report)
|
<commit_before><commit_msg>Add unit-level test for the SummaryReporter
Tests configuration of the report method of SummaryReader<commit_after># coding: utf8
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""Test text-based summary reporter for coverage.py"""
import collections
import unittest
import os.path
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from coverage import summary, data, control, config
LINES_1 = {
__file__: {-1: 1, 7: 1},
os.path.join(os.path.dirname(__file__), 'helpers.py'): {-1: 1, 7: 1},
}
class TestSummaryReporterConfiguration(unittest.TestCase):
def get_coverage_data(self, lines=LINES_1):
"""Get a CoverageData object that includes the requested lines."""
data1 = data.CoverageData()
data1.add_lines(lines)
return data1
def get_summary_text(self, coverage_data, options):
"""Get text output from the SummaryReporter."""
cov = control.Coverage()
cov.data = coverage_data
printer = summary.SummaryReporter(cov, options)
destination = StringIO()
printer.report([], destination)
return destination.getvalue()
def test_defaults(self):
"""Run the report with no configuration options."""
data = self.get_coverage_data()
opts = config.CoverageConfig()
report = self.get_summary_text(data, opts)
self.assertNotIn('Missing', report)
self.assertNotIn('Branch', report)
def test_print_missing(self):
"""Run the report printing the missing lines."""
data = self.get_coverage_data()
opts = config.CoverageConfig()
opts.from_args(show_missing=True)
report = self.get_summary_text(data, opts)
self.assertIn('Missing', report)
self.assertNotIn('Branch', report)
|
|
f132b8f2697ec2dc27529f9f633830566d73d663
|
tests/test_reset.py
|
tests/test_reset.py
|
#!/usr/bin/python
import sys
import pycurl
saw_error = 1
def main():
global saw_error
pycurl.global_init(pycurl.GLOBAL_DEFAULT)
outf = file("/dev/null", "rb+")
cm = pycurl.CurlMulti()
# Set multi handle's options
cm.setopt(pycurl.M_PIPELINING, 1)
eh = pycurl.Curl()
for x in range(1, 20):
eh.setopt(pycurl.WRITEDATA, outf)
eh.setopt(pycurl.URL, sys.argv[1])
cm.add_handle(eh)
while 1:
ret, active_handles = cm.perform()
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
while active_handles:
ret = cm.select(1.0)
if ret == -1:
continue
while 1:
ret, active_handles = cm.perform()
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
count, good, bad = cm.info_read()
for h, en, em in bad:
print "Transfer to %s failed with %d, %s\n" % \
(h.getinfo(pycurl.EFFECTIVE_URL), en, em)
raise RuntimeError
for h in good:
httpcode = h.getinfo(pycurl.RESPONSE_CODE)
if httpcode != 200:
print "Transfer to %s failed with code %d\n" %\
(h.getinfo(pycurl.EFFECTIVE_URL), httpcode)
raise RuntimeError
else:
print "Recd %d bytes from %s" % \
(h.getinfo(pycurl.SIZE_DOWNLOAD),
h.getinfo(pycurl.EFFECTIVE_URL))
cm.remove_handle(eh)
eh.reset()
eh.close()
cm.close()
outf.close()
pycurl.global_cleanup()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: %s <url>" % sys.argv[0]
sys.exit(2)
main()
|
Test for reset fixes refcount bug
|
Test for reset fixes refcount bug
|
Python
|
lgpl-2.1
|
jcharum/pycurl,ninemoreminutes/pycurl,ninemoreminutes/pycurl,jcharum/pycurl,jcharum/pycurl,ninemoreminutes/pycurl,ninemoreminutes/pycurl
|
Test for reset fixes refcount bug
|
#!/usr/bin/python
import sys
import pycurl
saw_error = 1
def main():
global saw_error
pycurl.global_init(pycurl.GLOBAL_DEFAULT)
outf = file("/dev/null", "rb+")
cm = pycurl.CurlMulti()
# Set multi handle's options
cm.setopt(pycurl.M_PIPELINING, 1)
eh = pycurl.Curl()
for x in range(1, 20):
eh.setopt(pycurl.WRITEDATA, outf)
eh.setopt(pycurl.URL, sys.argv[1])
cm.add_handle(eh)
while 1:
ret, active_handles = cm.perform()
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
while active_handles:
ret = cm.select(1.0)
if ret == -1:
continue
while 1:
ret, active_handles = cm.perform()
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
count, good, bad = cm.info_read()
for h, en, em in bad:
print "Transfer to %s failed with %d, %s\n" % \
(h.getinfo(pycurl.EFFECTIVE_URL), en, em)
raise RuntimeError
for h in good:
httpcode = h.getinfo(pycurl.RESPONSE_CODE)
if httpcode != 200:
print "Transfer to %s failed with code %d\n" %\
(h.getinfo(pycurl.EFFECTIVE_URL), httpcode)
raise RuntimeError
else:
print "Recd %d bytes from %s" % \
(h.getinfo(pycurl.SIZE_DOWNLOAD),
h.getinfo(pycurl.EFFECTIVE_URL))
cm.remove_handle(eh)
eh.reset()
eh.close()
cm.close()
outf.close()
pycurl.global_cleanup()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: %s <url>" % sys.argv[0]
sys.exit(2)
main()
|
<commit_before><commit_msg>Test for reset fixes refcount bug<commit_after>
|
#!/usr/bin/python
import sys
import pycurl
saw_error = 1
def main():
global saw_error
pycurl.global_init(pycurl.GLOBAL_DEFAULT)
outf = file("/dev/null", "rb+")
cm = pycurl.CurlMulti()
# Set multi handle's options
cm.setopt(pycurl.M_PIPELINING, 1)
eh = pycurl.Curl()
for x in range(1, 20):
eh.setopt(pycurl.WRITEDATA, outf)
eh.setopt(pycurl.URL, sys.argv[1])
cm.add_handle(eh)
while 1:
ret, active_handles = cm.perform()
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
while active_handles:
ret = cm.select(1.0)
if ret == -1:
continue
while 1:
ret, active_handles = cm.perform()
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
count, good, bad = cm.info_read()
for h, en, em in bad:
print "Transfer to %s failed with %d, %s\n" % \
(h.getinfo(pycurl.EFFECTIVE_URL), en, em)
raise RuntimeError
for h in good:
httpcode = h.getinfo(pycurl.RESPONSE_CODE)
if httpcode != 200:
print "Transfer to %s failed with code %d\n" %\
(h.getinfo(pycurl.EFFECTIVE_URL), httpcode)
raise RuntimeError
else:
print "Recd %d bytes from %s" % \
(h.getinfo(pycurl.SIZE_DOWNLOAD),
h.getinfo(pycurl.EFFECTIVE_URL))
cm.remove_handle(eh)
eh.reset()
eh.close()
cm.close()
outf.close()
pycurl.global_cleanup()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: %s <url>" % sys.argv[0]
sys.exit(2)
main()
|
Test for reset fixes refcount bug#!/usr/bin/python
import sys
import pycurl
saw_error = 1
def main():
global saw_error
pycurl.global_init(pycurl.GLOBAL_DEFAULT)
outf = file("/dev/null", "rb+")
cm = pycurl.CurlMulti()
# Set multi handle's options
cm.setopt(pycurl.M_PIPELINING, 1)
eh = pycurl.Curl()
for x in range(1, 20):
eh.setopt(pycurl.WRITEDATA, outf)
eh.setopt(pycurl.URL, sys.argv[1])
cm.add_handle(eh)
while 1:
ret, active_handles = cm.perform()
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
while active_handles:
ret = cm.select(1.0)
if ret == -1:
continue
while 1:
ret, active_handles = cm.perform()
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
count, good, bad = cm.info_read()
for h, en, em in bad:
print "Transfer to %s failed with %d, %s\n" % \
(h.getinfo(pycurl.EFFECTIVE_URL), en, em)
raise RuntimeError
for h in good:
httpcode = h.getinfo(pycurl.RESPONSE_CODE)
if httpcode != 200:
print "Transfer to %s failed with code %d\n" %\
(h.getinfo(pycurl.EFFECTIVE_URL), httpcode)
raise RuntimeError
else:
print "Recd %d bytes from %s" % \
(h.getinfo(pycurl.SIZE_DOWNLOAD),
h.getinfo(pycurl.EFFECTIVE_URL))
cm.remove_handle(eh)
eh.reset()
eh.close()
cm.close()
outf.close()
pycurl.global_cleanup()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: %s <url>" % sys.argv[0]
sys.exit(2)
main()
|
<commit_before><commit_msg>Test for reset fixes refcount bug<commit_after>#!/usr/bin/python
import sys
import pycurl
saw_error = 1
def main():
global saw_error
pycurl.global_init(pycurl.GLOBAL_DEFAULT)
outf = file("/dev/null", "rb+")
cm = pycurl.CurlMulti()
# Set multi handle's options
cm.setopt(pycurl.M_PIPELINING, 1)
eh = pycurl.Curl()
for x in range(1, 20):
eh.setopt(pycurl.WRITEDATA, outf)
eh.setopt(pycurl.URL, sys.argv[1])
cm.add_handle(eh)
while 1:
ret, active_handles = cm.perform()
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
while active_handles:
ret = cm.select(1.0)
if ret == -1:
continue
while 1:
ret, active_handles = cm.perform()
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
count, good, bad = cm.info_read()
for h, en, em in bad:
print "Transfer to %s failed with %d, %s\n" % \
(h.getinfo(pycurl.EFFECTIVE_URL), en, em)
raise RuntimeError
for h in good:
httpcode = h.getinfo(pycurl.RESPONSE_CODE)
if httpcode != 200:
print "Transfer to %s failed with code %d\n" %\
(h.getinfo(pycurl.EFFECTIVE_URL), httpcode)
raise RuntimeError
else:
print "Recd %d bytes from %s" % \
(h.getinfo(pycurl.SIZE_DOWNLOAD),
h.getinfo(pycurl.EFFECTIVE_URL))
cm.remove_handle(eh)
eh.reset()
eh.close()
cm.close()
outf.close()
pycurl.global_cleanup()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: %s <url>" % sys.argv[0]
sys.exit(2)
main()
|
|
b8db80eb446e20376cd24fda39f3bf2485e36371
|
tests/test_utils.py
|
tests/test_utils.py
|
def test_fact_mjd_conversion():
from aux2mongodb.utils import fact_mjd_to_datetime
timestamp = fact_mjd_to_datetime(16801.33)
assert timestamp.year == 2016
assert timestamp.month == 1
assert timestamp.day == 1
assert timestamp.hour == 7
assert timestamp.minute == 55
|
Add test for date conversion
|
Add test for date conversion
|
Python
|
mit
|
fact-project/aux2mongodb
|
Add test for date conversion
|
def test_fact_mjd_conversion():
from aux2mongodb.utils import fact_mjd_to_datetime
timestamp = fact_mjd_to_datetime(16801.33)
assert timestamp.year == 2016
assert timestamp.month == 1
assert timestamp.day == 1
assert timestamp.hour == 7
assert timestamp.minute == 55
|
<commit_before><commit_msg>Add test for date conversion<commit_after>
|
def test_fact_mjd_conversion():
from aux2mongodb.utils import fact_mjd_to_datetime
timestamp = fact_mjd_to_datetime(16801.33)
assert timestamp.year == 2016
assert timestamp.month == 1
assert timestamp.day == 1
assert timestamp.hour == 7
assert timestamp.minute == 55
|
Add test for date conversiondef test_fact_mjd_conversion():
from aux2mongodb.utils import fact_mjd_to_datetime
timestamp = fact_mjd_to_datetime(16801.33)
assert timestamp.year == 2016
assert timestamp.month == 1
assert timestamp.day == 1
assert timestamp.hour == 7
assert timestamp.minute == 55
|
<commit_before><commit_msg>Add test for date conversion<commit_after>def test_fact_mjd_conversion():
from aux2mongodb.utils import fact_mjd_to_datetime
timestamp = fact_mjd_to_datetime(16801.33)
assert timestamp.year == 2016
assert timestamp.month == 1
assert timestamp.day == 1
assert timestamp.hour == 7
assert timestamp.minute == 55
|
|
2af220f9d0a9d49c69d54ce1985ec586af9e473b
|
tools/stats/track_recall.py
|
tools/stats/track_recall.py
|
#!/usr/bin/env python
from vdetlib.utils.protocol import proto_load, proto_dump, track_box_at_frame
from vdetlib.utils.common import iou
import argparse
import numpy as np
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('vid_file')
parser.add_argument('annot_file')
parser.add_argument('track_file')
args = parser.parse_args()
vid_proto = proto_load(args.vid_file)
annot_proto = proto_load(args.annot_file)
track_proto = proto_load(args.track_file)
gt_count = 0
recall_count = 0
for frame in vid_proto['frames']:
frame_id = frame['frame']
# annot boxes
annot_boxes = [track_box_at_frame(annot_track['track'], frame_id) \
for annot_track in annot_proto['annotations']]
annot_boxes = [box for box in annot_boxes if box is not None]
if len(annot_boxes) == 0: continue
gt_count += len(annot_boxes)
# track boxes
track_boxes = [track_box_at_frame(tracklet, frame_id) \
for tracklet in track_proto['tracks']]
track_boxes = [box for box in track_boxes if box is not None]
if len(track_boxes) == 0: continue
overlaps = iou(np.asarray(annot_boxes), np.asarray(track_boxes))
max_overlaps = overlaps.max(axis=1)
recall_count += np.count_nonzero(max_overlaps >= 0.5)
print "{} {} {} {}".format(vid_proto['video'],
gt_count, recall_count, float(recall_count) / gt_count)
|
Add a script to calculate track recalls.
|
Add a script to calculate track recalls.
|
Python
|
mit
|
myfavouritekk/TPN
|
Add a script to calculate track recalls.
|
#!/usr/bin/env python
from vdetlib.utils.protocol import proto_load, proto_dump, track_box_at_frame
from vdetlib.utils.common import iou
import argparse
import numpy as np
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('vid_file')
parser.add_argument('annot_file')
parser.add_argument('track_file')
args = parser.parse_args()
vid_proto = proto_load(args.vid_file)
annot_proto = proto_load(args.annot_file)
track_proto = proto_load(args.track_file)
gt_count = 0
recall_count = 0
for frame in vid_proto['frames']:
frame_id = frame['frame']
# annot boxes
annot_boxes = [track_box_at_frame(annot_track['track'], frame_id) \
for annot_track in annot_proto['annotations']]
annot_boxes = [box for box in annot_boxes if box is not None]
if len(annot_boxes) == 0: continue
gt_count += len(annot_boxes)
# track boxes
track_boxes = [track_box_at_frame(tracklet, frame_id) \
for tracklet in track_proto['tracks']]
track_boxes = [box for box in track_boxes if box is not None]
if len(track_boxes) == 0: continue
overlaps = iou(np.asarray(annot_boxes), np.asarray(track_boxes))
max_overlaps = overlaps.max(axis=1)
recall_count += np.count_nonzero(max_overlaps >= 0.5)
print "{} {} {} {}".format(vid_proto['video'],
gt_count, recall_count, float(recall_count) / gt_count)
|
<commit_before><commit_msg>Add a script to calculate track recalls.<commit_after>
|
#!/usr/bin/env python
from vdetlib.utils.protocol import proto_load, proto_dump, track_box_at_frame
from vdetlib.utils.common import iou
import argparse
import numpy as np
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('vid_file')
parser.add_argument('annot_file')
parser.add_argument('track_file')
args = parser.parse_args()
vid_proto = proto_load(args.vid_file)
annot_proto = proto_load(args.annot_file)
track_proto = proto_load(args.track_file)
gt_count = 0
recall_count = 0
for frame in vid_proto['frames']:
frame_id = frame['frame']
# annot boxes
annot_boxes = [track_box_at_frame(annot_track['track'], frame_id) \
for annot_track in annot_proto['annotations']]
annot_boxes = [box for box in annot_boxes if box is not None]
if len(annot_boxes) == 0: continue
gt_count += len(annot_boxes)
# track boxes
track_boxes = [track_box_at_frame(tracklet, frame_id) \
for tracklet in track_proto['tracks']]
track_boxes = [box for box in track_boxes if box is not None]
if len(track_boxes) == 0: continue
overlaps = iou(np.asarray(annot_boxes), np.asarray(track_boxes))
max_overlaps = overlaps.max(axis=1)
recall_count += np.count_nonzero(max_overlaps >= 0.5)
print "{} {} {} {}".format(vid_proto['video'],
gt_count, recall_count, float(recall_count) / gt_count)
|
Add a script to calculate track recalls.#!/usr/bin/env python
from vdetlib.utils.protocol import proto_load, proto_dump, track_box_at_frame
from vdetlib.utils.common import iou
import argparse
import numpy as np
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('vid_file')
parser.add_argument('annot_file')
parser.add_argument('track_file')
args = parser.parse_args()
vid_proto = proto_load(args.vid_file)
annot_proto = proto_load(args.annot_file)
track_proto = proto_load(args.track_file)
gt_count = 0
recall_count = 0
for frame in vid_proto['frames']:
frame_id = frame['frame']
# annot boxes
annot_boxes = [track_box_at_frame(annot_track['track'], frame_id) \
for annot_track in annot_proto['annotations']]
annot_boxes = [box for box in annot_boxes if box is not None]
if len(annot_boxes) == 0: continue
gt_count += len(annot_boxes)
# track boxes
track_boxes = [track_box_at_frame(tracklet, frame_id) \
for tracklet in track_proto['tracks']]
track_boxes = [box for box in track_boxes if box is not None]
if len(track_boxes) == 0: continue
overlaps = iou(np.asarray(annot_boxes), np.asarray(track_boxes))
max_overlaps = overlaps.max(axis=1)
recall_count += np.count_nonzero(max_overlaps >= 0.5)
print "{} {} {} {}".format(vid_proto['video'],
gt_count, recall_count, float(recall_count) / gt_count)
|
<commit_before><commit_msg>Add a script to calculate track recalls.<commit_after>#!/usr/bin/env python
from vdetlib.utils.protocol import proto_load, proto_dump, track_box_at_frame
from vdetlib.utils.common import iou
import argparse
import numpy as np
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('vid_file')
parser.add_argument('annot_file')
parser.add_argument('track_file')
args = parser.parse_args()
vid_proto = proto_load(args.vid_file)
annot_proto = proto_load(args.annot_file)
track_proto = proto_load(args.track_file)
gt_count = 0
recall_count = 0
for frame in vid_proto['frames']:
frame_id = frame['frame']
# annot boxes
annot_boxes = [track_box_at_frame(annot_track['track'], frame_id) \
for annot_track in annot_proto['annotations']]
annot_boxes = [box for box in annot_boxes if box is not None]
if len(annot_boxes) == 0: continue
gt_count += len(annot_boxes)
# track boxes
track_boxes = [track_box_at_frame(tracklet, frame_id) \
for tracklet in track_proto['tracks']]
track_boxes = [box for box in track_boxes if box is not None]
if len(track_boxes) == 0: continue
overlaps = iou(np.asarray(annot_boxes), np.asarray(track_boxes))
max_overlaps = overlaps.max(axis=1)
recall_count += np.count_nonzero(max_overlaps >= 0.5)
print "{} {} {} {}".format(vid_proto['video'],
gt_count, recall_count, float(recall_count) / gt_count)
|
|
58870974c218a2b4dfc0a53c17af50138d90a8b2
|
send_email_using_smtp.py
|
send_email_using_smtp.py
|
from email.mime.text import MIMEText
from smtplib import SMTP
import logging
#from settings import EMAIL_FROM, EMAIL_MSG, EMAIL_TO, SERVER
EMAIL_FROM = 'me@example.com'
EMAIL_MSG = 'Hi friend!'
EMAIL_SUBJECT = 'Hi'
EMAIL_TO = 'friend@example.com'
SERVER = 'smtp.example.com'
if __name__ == '__main__':
msg = MIMEText(EMAIL_MSG)
msg['Subject'] = EMAIL_SUBJECT
msg['From'] = EMAIL_FROM
msg['To'] = EMAIL_TO
with SMTP(SERVER) as smtp:
#smtp.sendmail(EMAIL_FROM, EMAIL_TO, EMAIL_MSG)
smtp.send_message(msg)
|
Add send email using SMTP example
|
Add send email using SMTP example
|
Python
|
mit
|
MattMS/Python_3_examples
|
Add send email using SMTP example
|
from email.mime.text import MIMEText
from smtplib import SMTP
import logging
#from settings import EMAIL_FROM, EMAIL_MSG, EMAIL_TO, SERVER
EMAIL_FROM = 'me@example.com'
EMAIL_MSG = 'Hi friend!'
EMAIL_SUBJECT = 'Hi'
EMAIL_TO = 'friend@example.com'
SERVER = 'smtp.example.com'
if __name__ == '__main__':
msg = MIMEText(EMAIL_MSG)
msg['Subject'] = EMAIL_SUBJECT
msg['From'] = EMAIL_FROM
msg['To'] = EMAIL_TO
with SMTP(SERVER) as smtp:
#smtp.sendmail(EMAIL_FROM, EMAIL_TO, EMAIL_MSG)
smtp.send_message(msg)
|
<commit_before><commit_msg>Add send email using SMTP example<commit_after>
|
from email.mime.text import MIMEText
from smtplib import SMTP
import logging
#from settings import EMAIL_FROM, EMAIL_MSG, EMAIL_TO, SERVER
EMAIL_FROM = 'me@example.com'
EMAIL_MSG = 'Hi friend!'
EMAIL_SUBJECT = 'Hi'
EMAIL_TO = 'friend@example.com'
SERVER = 'smtp.example.com'
if __name__ == '__main__':
msg = MIMEText(EMAIL_MSG)
msg['Subject'] = EMAIL_SUBJECT
msg['From'] = EMAIL_FROM
msg['To'] = EMAIL_TO
with SMTP(SERVER) as smtp:
#smtp.sendmail(EMAIL_FROM, EMAIL_TO, EMAIL_MSG)
smtp.send_message(msg)
|
Add send email using SMTP examplefrom email.mime.text import MIMEText
from smtplib import SMTP
import logging
#from settings import EMAIL_FROM, EMAIL_MSG, EMAIL_TO, SERVER
EMAIL_FROM = 'me@example.com'
EMAIL_MSG = 'Hi friend!'
EMAIL_SUBJECT = 'Hi'
EMAIL_TO = 'friend@example.com'
SERVER = 'smtp.example.com'
if __name__ == '__main__':
msg = MIMEText(EMAIL_MSG)
msg['Subject'] = EMAIL_SUBJECT
msg['From'] = EMAIL_FROM
msg['To'] = EMAIL_TO
with SMTP(SERVER) as smtp:
#smtp.sendmail(EMAIL_FROM, EMAIL_TO, EMAIL_MSG)
smtp.send_message(msg)
|
<commit_before><commit_msg>Add send email using SMTP example<commit_after>from email.mime.text import MIMEText
from smtplib import SMTP
import logging
#from settings import EMAIL_FROM, EMAIL_MSG, EMAIL_TO, SERVER
EMAIL_FROM = 'me@example.com'
EMAIL_MSG = 'Hi friend!'
EMAIL_SUBJECT = 'Hi'
EMAIL_TO = 'friend@example.com'
SERVER = 'smtp.example.com'
if __name__ == '__main__':
msg = MIMEText(EMAIL_MSG)
msg['Subject'] = EMAIL_SUBJECT
msg['From'] = EMAIL_FROM
msg['To'] = EMAIL_TO
with SMTP(SERVER) as smtp:
#smtp.sendmail(EMAIL_FROM, EMAIL_TO, EMAIL_MSG)
smtp.send_message(msg)
|
|
5725fc0c5cd8acc22e332be10e43e32de601bc95
|
scripts/get_bank_registry_pl.py
|
scripts/get_bank_registry_pl.py
|
import json
import csv
import requests
URL = "https://ewib.nbp.pl/plewibnra?dokNazwa=plewibnra.txt"
def process():
registry = []
with requests.get(URL, stream=True) as txtfile:
for row in txtfile.iter_lines():
if len(row.decode("latin1").split("\t")) != 33:
continue
else:
registry.append(
{
"country_code": "PL",
"primary": True,
"bic": row.decode("latin1").split("\t")[19].strip().upper(),
"bank_code": row.decode("latin1").split("\t")[4].strip(),
"name": row.decode("latin1").split("\t")[1].strip(),
"short_name": row.decode("latin1").split("\t")[1].strip(),
}
)
return registry
if __name__ == "__main__":
with open("schwifty/bank_registry/generated_pl.json", "w") as fp:
json.dump(process(), fp, indent=2)
|
Create script to generate PL bank registry
|
Create script to generate PL bank registry
|
Python
|
mit
|
figo-connect/schwifty
|
Create script to generate PL bank registry
|
import json
import csv
import requests
URL = "https://ewib.nbp.pl/plewibnra?dokNazwa=plewibnra.txt"
def process():
registry = []
with requests.get(URL, stream=True) as txtfile:
for row in txtfile.iter_lines():
if len(row.decode("latin1").split("\t")) != 33:
continue
else:
registry.append(
{
"country_code": "PL",
"primary": True,
"bic": row.decode("latin1").split("\t")[19].strip().upper(),
"bank_code": row.decode("latin1").split("\t")[4].strip(),
"name": row.decode("latin1").split("\t")[1].strip(),
"short_name": row.decode("latin1").split("\t")[1].strip(),
}
)
return registry
if __name__ == "__main__":
with open("schwifty/bank_registry/generated_pl.json", "w") as fp:
json.dump(process(), fp, indent=2)
|
<commit_before><commit_msg>Create script to generate PL bank registry<commit_after>
|
import json
import csv
import requests
URL = "https://ewib.nbp.pl/plewibnra?dokNazwa=plewibnra.txt"
def process():
registry = []
with requests.get(URL, stream=True) as txtfile:
for row in txtfile.iter_lines():
if len(row.decode("latin1").split("\t")) != 33:
continue
else:
registry.append(
{
"country_code": "PL",
"primary": True,
"bic": row.decode("latin1").split("\t")[19].strip().upper(),
"bank_code": row.decode("latin1").split("\t")[4].strip(),
"name": row.decode("latin1").split("\t")[1].strip(),
"short_name": row.decode("latin1").split("\t")[1].strip(),
}
)
return registry
if __name__ == "__main__":
with open("schwifty/bank_registry/generated_pl.json", "w") as fp:
json.dump(process(), fp, indent=2)
|
Create script to generate PL bank registryimport json
import csv
import requests
URL = "https://ewib.nbp.pl/plewibnra?dokNazwa=plewibnra.txt"
def process():
registry = []
with requests.get(URL, stream=True) as txtfile:
for row in txtfile.iter_lines():
if len(row.decode("latin1").split("\t")) != 33:
continue
else:
registry.append(
{
"country_code": "PL",
"primary": True,
"bic": row.decode("latin1").split("\t")[19].strip().upper(),
"bank_code": row.decode("latin1").split("\t")[4].strip(),
"name": row.decode("latin1").split("\t")[1].strip(),
"short_name": row.decode("latin1").split("\t")[1].strip(),
}
)
return registry
if __name__ == "__main__":
with open("schwifty/bank_registry/generated_pl.json", "w") as fp:
json.dump(process(), fp, indent=2)
|
<commit_before><commit_msg>Create script to generate PL bank registry<commit_after>import json
import csv
import requests
URL = "https://ewib.nbp.pl/plewibnra?dokNazwa=plewibnra.txt"
def process():
registry = []
with requests.get(URL, stream=True) as txtfile:
for row in txtfile.iter_lines():
if len(row.decode("latin1").split("\t")) != 33:
continue
else:
registry.append(
{
"country_code": "PL",
"primary": True,
"bic": row.decode("latin1").split("\t")[19].strip().upper(),
"bank_code": row.decode("latin1").split("\t")[4].strip(),
"name": row.decode("latin1").split("\t")[1].strip(),
"short_name": row.decode("latin1").split("\t")[1].strip(),
}
)
return registry
if __name__ == "__main__":
with open("schwifty/bank_registry/generated_pl.json", "w") as fp:
json.dump(process(), fp, indent=2)
|
|
4ab4bfedbecd70be183b1785562b3b8a97f8c50a
|
tests/learn/dl/test_models.py
|
tests/learn/dl/test_models.py
|
import sys
import pytest
from numpy.testing import assert_equal
import torch
sys.path.append("../../../")
from pycroscopy.learn import models
@pytest.mark.parametrize("dim, size", [(1, [8]), (2, [8, 8]), (3, [8, 8, 8])])
def test_autoencoder_output(dim, size):
input_dim = (1, *size)
x = torch.randn(2, *input_dim)
ae = models.AutoEncoder(input_dim, 2, [1, 1])
out = ae(x)
assert_equal(input_dim, out.shape[1:])
@pytest.mark.parametrize("zdim", [1, 2, 5])
@pytest.mark.parametrize("dim, size", [(1, [8]), (2, [8, 8]), (3, [8, 8, 8])])
def test_autoencoder_encoding(zdim, dim, size):
input_dim = (1, *size)
x = torch.randn(2, *input_dim)
ae = models.AutoEncoder(input_dim, zdim, [1, 1])
z = ae.encode(x)
assert_equal(zdim, z.shape[-1])
@pytest.mark.parametrize("zdim", [1, 2, 5])
@pytest.mark.parametrize("dim, size", [(1, [8]), (2, [8, 8]), (3, [8, 8, 8])])
def test_autoencoder_decoding(zdim, dim, size):
input_dim = (1, *size)
x = torch.randn(2, *input_dim)
ae = models.AutoEncoder(input_dim, zdim, [1, 1])
z = torch.randn(zdim)
x_ = ae.decode(z)
assert_equal(x_.shape[1:], x.shape[1:])
@pytest.mark.parametrize("dim, size", [(1, [8]), (2, [8, 8]), (3, [8, 8, 8])])
def test_autoencoder_decode_grid(dim, size):
input_dim = (1, *size)
grid_spacing = 4
x = torch.randn(2, *input_dim)
ae = models.AutoEncoder(input_dim, 2, [1, 1])
grid = ae.decode_grid(grid_spacing)
assert_equal(grid.shape[0], grid_spacing**2)
assert_equal(grid.shape[1:], x.shape[2:])
|
Add tests for autoencoder models
|
Add tests for autoencoder models
|
Python
|
mit
|
pycroscopy/pycroscopy
|
Add tests for autoencoder models
|
import sys
import pytest
from numpy.testing import assert_equal
import torch
sys.path.append("../../../")
from pycroscopy.learn import models
@pytest.mark.parametrize("dim, size", [(1, [8]), (2, [8, 8]), (3, [8, 8, 8])])
def test_autoencoder_output(dim, size):
input_dim = (1, *size)
x = torch.randn(2, *input_dim)
ae = models.AutoEncoder(input_dim, 2, [1, 1])
out = ae(x)
assert_equal(input_dim, out.shape[1:])
@pytest.mark.parametrize("zdim", [1, 2, 5])
@pytest.mark.parametrize("dim, size", [(1, [8]), (2, [8, 8]), (3, [8, 8, 8])])
def test_autoencoder_encoding(zdim, dim, size):
input_dim = (1, *size)
x = torch.randn(2, *input_dim)
ae = models.AutoEncoder(input_dim, zdim, [1, 1])
z = ae.encode(x)
assert_equal(zdim, z.shape[-1])
@pytest.mark.parametrize("zdim", [1, 2, 5])
@pytest.mark.parametrize("dim, size", [(1, [8]), (2, [8, 8]), (3, [8, 8, 8])])
def test_autoencoder_decoding(zdim, dim, size):
input_dim = (1, *size)
x = torch.randn(2, *input_dim)
ae = models.AutoEncoder(input_dim, zdim, [1, 1])
z = torch.randn(zdim)
x_ = ae.decode(z)
assert_equal(x_.shape[1:], x.shape[1:])
@pytest.mark.parametrize("dim, size", [(1, [8]), (2, [8, 8]), (3, [8, 8, 8])])
def test_autoencoder_decode_grid(dim, size):
input_dim = (1, *size)
grid_spacing = 4
x = torch.randn(2, *input_dim)
ae = models.AutoEncoder(input_dim, 2, [1, 1])
grid = ae.decode_grid(grid_spacing)
assert_equal(grid.shape[0], grid_spacing**2)
assert_equal(grid.shape[1:], x.shape[2:])
|
<commit_before><commit_msg>Add tests for autoencoder models<commit_after>
|
import sys
import pytest
from numpy.testing import assert_equal
import torch
sys.path.append("../../../")
from pycroscopy.learn import models
@pytest.mark.parametrize("dim, size", [(1, [8]), (2, [8, 8]), (3, [8, 8, 8])])
def test_autoencoder_output(dim, size):
input_dim = (1, *size)
x = torch.randn(2, *input_dim)
ae = models.AutoEncoder(input_dim, 2, [1, 1])
out = ae(x)
assert_equal(input_dim, out.shape[1:])
@pytest.mark.parametrize("zdim", [1, 2, 5])
@pytest.mark.parametrize("dim, size", [(1, [8]), (2, [8, 8]), (3, [8, 8, 8])])
def test_autoencoder_encoding(zdim, dim, size):
input_dim = (1, *size)
x = torch.randn(2, *input_dim)
ae = models.AutoEncoder(input_dim, zdim, [1, 1])
z = ae.encode(x)
assert_equal(zdim, z.shape[-1])
@pytest.mark.parametrize("zdim", [1, 2, 5])
@pytest.mark.parametrize("dim, size", [(1, [8]), (2, [8, 8]), (3, [8, 8, 8])])
def test_autoencoder_decoding(zdim, dim, size):
input_dim = (1, *size)
x = torch.randn(2, *input_dim)
ae = models.AutoEncoder(input_dim, zdim, [1, 1])
z = torch.randn(zdim)
x_ = ae.decode(z)
assert_equal(x_.shape[1:], x.shape[1:])
@pytest.mark.parametrize("dim, size", [(1, [8]), (2, [8, 8]), (3, [8, 8, 8])])
def test_autoencoder_decode_grid(dim, size):
input_dim = (1, *size)
grid_spacing = 4
x = torch.randn(2, *input_dim)
ae = models.AutoEncoder(input_dim, 2, [1, 1])
grid = ae.decode_grid(grid_spacing)
assert_equal(grid.shape[0], grid_spacing**2)
assert_equal(grid.shape[1:], x.shape[2:])
|
Add tests for autoencoder modelsimport sys
import pytest
from numpy.testing import assert_equal
import torch
sys.path.append("../../../")
from pycroscopy.learn import models
@pytest.mark.parametrize("dim, size", [(1, [8]), (2, [8, 8]), (3, [8, 8, 8])])
def test_autoencoder_output(dim, size):
input_dim = (1, *size)
x = torch.randn(2, *input_dim)
ae = models.AutoEncoder(input_dim, 2, [1, 1])
out = ae(x)
assert_equal(input_dim, out.shape[1:])
@pytest.mark.parametrize("zdim", [1, 2, 5])
@pytest.mark.parametrize("dim, size", [(1, [8]), (2, [8, 8]), (3, [8, 8, 8])])
def test_autoencoder_encoding(zdim, dim, size):
input_dim = (1, *size)
x = torch.randn(2, *input_dim)
ae = models.AutoEncoder(input_dim, zdim, [1, 1])
z = ae.encode(x)
assert_equal(zdim, z.shape[-1])
@pytest.mark.parametrize("zdim", [1, 2, 5])
@pytest.mark.parametrize("dim, size", [(1, [8]), (2, [8, 8]), (3, [8, 8, 8])])
def test_autoencoder_decoding(zdim, dim, size):
input_dim = (1, *size)
x = torch.randn(2, *input_dim)
ae = models.AutoEncoder(input_dim, zdim, [1, 1])
z = torch.randn(zdim)
x_ = ae.decode(z)
assert_equal(x_.shape[1:], x.shape[1:])
@pytest.mark.parametrize("dim, size", [(1, [8]), (2, [8, 8]), (3, [8, 8, 8])])
def test_autoencoder_decode_grid(dim, size):
input_dim = (1, *size)
grid_spacing = 4
x = torch.randn(2, *input_dim)
ae = models.AutoEncoder(input_dim, 2, [1, 1])
grid = ae.decode_grid(grid_spacing)
assert_equal(grid.shape[0], grid_spacing**2)
assert_equal(grid.shape[1:], x.shape[2:])
|
<commit_before><commit_msg>Add tests for autoencoder models<commit_after>import sys
import pytest
from numpy.testing import assert_equal
import torch
sys.path.append("../../../")
from pycroscopy.learn import models
@pytest.mark.parametrize("dim, size", [(1, [8]), (2, [8, 8]), (3, [8, 8, 8])])
def test_autoencoder_output(dim, size):
input_dim = (1, *size)
x = torch.randn(2, *input_dim)
ae = models.AutoEncoder(input_dim, 2, [1, 1])
out = ae(x)
assert_equal(input_dim, out.shape[1:])
@pytest.mark.parametrize("zdim", [1, 2, 5])
@pytest.mark.parametrize("dim, size", [(1, [8]), (2, [8, 8]), (3, [8, 8, 8])])
def test_autoencoder_encoding(zdim, dim, size):
input_dim = (1, *size)
x = torch.randn(2, *input_dim)
ae = models.AutoEncoder(input_dim, zdim, [1, 1])
z = ae.encode(x)
assert_equal(zdim, z.shape[-1])
@pytest.mark.parametrize("zdim", [1, 2, 5])
@pytest.mark.parametrize("dim, size", [(1, [8]), (2, [8, 8]), (3, [8, 8, 8])])
def test_autoencoder_decoding(zdim, dim, size):
input_dim = (1, *size)
x = torch.randn(2, *input_dim)
ae = models.AutoEncoder(input_dim, zdim, [1, 1])
z = torch.randn(zdim)
x_ = ae.decode(z)
assert_equal(x_.shape[1:], x.shape[1:])
@pytest.mark.parametrize("dim, size", [(1, [8]), (2, [8, 8]), (3, [8, 8, 8])])
def test_autoencoder_decode_grid(dim, size):
input_dim = (1, *size)
grid_spacing = 4
x = torch.randn(2, *input_dim)
ae = models.AutoEncoder(input_dim, 2, [1, 1])
grid = ae.decode_grid(grid_spacing)
assert_equal(grid.shape[0], grid_spacing**2)
assert_equal(grid.shape[1:], x.shape[2:])
|
|
470998b97ea5c6cf5ed37ab4e6fd4dcf72e2888a
|
interface_import.py
|
interface_import.py
|
__version__ = '0.9'
__author__ = 'Remi Batist'
# Importing interface-settings from pre-defined csv-file
# used row format shown in the example below
# csv delimiter ' ; '
# interface description linktype permitvlan pvid
# GigabitEthernet1/0/21 server-1 access 23
# GigabitEthernet1/0/22 server-2 trunk 10 12 10
#### Importing python modules
import csv
import comware
import os
import sys
import termios
#### File input function
fd = sys.stdin.fileno();
new = termios.tcgetattr(fd)
new[3] = new[3] | termios.ICANON | termios.ECHO
new[6] [termios.VMIN] = 1
new[6] [termios.VTIME] = 0
termios.tcsetattr(fd, termios.TCSANOW, new)
termios.tcsendbreak(fd,0)
print ''
file_in = raw_input(" csv-file to import: ")
#### Importing rows
item_in_1 = 'interface'
item_in_2 = 'description'
item_in_3 = 'linktype'
item_in_4 = 'permitvlan'
item_in_5 = 'pvid'
#### Open file
with open(file_in,'r') as f:
reader = csv.DictReader(f, delimiter=';')
rows = list(reader)
#### Reading file
for row in rows:
#### Setting link-type
if row[item_in_3] == 'access':
linktype = 'port link-type access'
set_pvid = 'port access vlan '
set_permit =''
else:
linktype = 'port link-type trunk'
set_pvid = 'port trunk pvid vlan '
set_permit ='port trunk permit vlan '
#### Deploying settings
print ''
print 'Deploying settings...'
print ''
strcli = "system ;%s ;%s ;%s ;%s ;%s" % ('interface '+row[item_in_1], 'description ' +row[item_in_2], linktype, set_pvid + row[item_in_5], set_permit + row[item_in_4])
comware.CLI(strcli)
|
Set interface-config from pre-defined csv-file
|
Set interface-config from pre-defined csv-file
|
Python
|
mit
|
rbatist/HPN-Scripting,networkingdvi/HPN-Scripting
|
Set interface-config from pre-defined csv-file
|
__version__ = '0.9'
__author__ = 'Remi Batist'
# Importing interface-settings from pre-defined csv-file
# used row format shown in the example below
# csv delimiter ' ; '
# interface description linktype permitvlan pvid
# GigabitEthernet1/0/21 server-1 access 23
# GigabitEthernet1/0/22 server-2 trunk 10 12 10
#### Importing python modules
import csv
import comware
import os
import sys
import termios
#### File input function
fd = sys.stdin.fileno();
new = termios.tcgetattr(fd)
new[3] = new[3] | termios.ICANON | termios.ECHO
new[6] [termios.VMIN] = 1
new[6] [termios.VTIME] = 0
termios.tcsetattr(fd, termios.TCSANOW, new)
termios.tcsendbreak(fd,0)
print ''
file_in = raw_input(" csv-file to import: ")
#### Importing rows
item_in_1 = 'interface'
item_in_2 = 'description'
item_in_3 = 'linktype'
item_in_4 = 'permitvlan'
item_in_5 = 'pvid'
#### Open file
with open(file_in,'r') as f:
reader = csv.DictReader(f, delimiter=';')
rows = list(reader)
#### Reading file
for row in rows:
#### Setting link-type
if row[item_in_3] == 'access':
linktype = 'port link-type access'
set_pvid = 'port access vlan '
set_permit =''
else:
linktype = 'port link-type trunk'
set_pvid = 'port trunk pvid vlan '
set_permit ='port trunk permit vlan '
#### Deploying settings
print ''
print 'Deploying settings...'
print ''
strcli = "system ;%s ;%s ;%s ;%s ;%s" % ('interface '+row[item_in_1], 'description ' +row[item_in_2], linktype, set_pvid + row[item_in_5], set_permit + row[item_in_4])
comware.CLI(strcli)
|
<commit_before><commit_msg>Set interface-config from pre-defined csv-file<commit_after>
|
__version__ = '0.9'
__author__ = 'Remi Batist'
# Importing interface-settings from pre-defined csv-file
# used row format shown in the example below
# csv delimiter ' ; '
# interface description linktype permitvlan pvid
# GigabitEthernet1/0/21 server-1 access 23
# GigabitEthernet1/0/22 server-2 trunk 10 12 10
#### Importing python modules
import csv
import comware
import os
import sys
import termios
#### File input function
fd = sys.stdin.fileno();
new = termios.tcgetattr(fd)
new[3] = new[3] | termios.ICANON | termios.ECHO
new[6] [termios.VMIN] = 1
new[6] [termios.VTIME] = 0
termios.tcsetattr(fd, termios.TCSANOW, new)
termios.tcsendbreak(fd,0)
print ''
file_in = raw_input(" csv-file to import: ")
#### Importing rows
item_in_1 = 'interface'
item_in_2 = 'description'
item_in_3 = 'linktype'
item_in_4 = 'permitvlan'
item_in_5 = 'pvid'
#### Open file
with open(file_in,'r') as f:
reader = csv.DictReader(f, delimiter=';')
rows = list(reader)
#### Reading file
for row in rows:
#### Setting link-type
if row[item_in_3] == 'access':
linktype = 'port link-type access'
set_pvid = 'port access vlan '
set_permit =''
else:
linktype = 'port link-type trunk'
set_pvid = 'port trunk pvid vlan '
set_permit ='port trunk permit vlan '
#### Deploying settings
print ''
print 'Deploying settings...'
print ''
strcli = "system ;%s ;%s ;%s ;%s ;%s" % ('interface '+row[item_in_1], 'description ' +row[item_in_2], linktype, set_pvid + row[item_in_5], set_permit + row[item_in_4])
comware.CLI(strcli)
|
Set interface-config from pre-defined csv-file
__version__ = '0.9'
__author__ = 'Remi Batist'
# Importing interface-settings from pre-defined csv-file
# used row format shown in the example below
# csv delimiter ' ; '
# interface description linktype permitvlan pvid
# GigabitEthernet1/0/21 server-1 access 23
# GigabitEthernet1/0/22 server-2 trunk 10 12 10
#### Importing python modules
import csv
import comware
import os
import sys
import termios
#### File input function
fd = sys.stdin.fileno();
new = termios.tcgetattr(fd)
new[3] = new[3] | termios.ICANON | termios.ECHO
new[6] [termios.VMIN] = 1
new[6] [termios.VTIME] = 0
termios.tcsetattr(fd, termios.TCSANOW, new)
termios.tcsendbreak(fd,0)
print ''
file_in = raw_input(" csv-file to import: ")
#### Importing rows
item_in_1 = 'interface'
item_in_2 = 'description'
item_in_3 = 'linktype'
item_in_4 = 'permitvlan'
item_in_5 = 'pvid'
#### Open file
with open(file_in,'r') as f:
reader = csv.DictReader(f, delimiter=';')
rows = list(reader)
#### Reading file
for row in rows:
#### Setting link-type
if row[item_in_3] == 'access':
linktype = 'port link-type access'
set_pvid = 'port access vlan '
set_permit =''
else:
linktype = 'port link-type trunk'
set_pvid = 'port trunk pvid vlan '
set_permit ='port trunk permit vlan '
#### Deploying settings
print ''
print 'Deploying settings...'
print ''
strcli = "system ;%s ;%s ;%s ;%s ;%s" % ('interface '+row[item_in_1], 'description ' +row[item_in_2], linktype, set_pvid + row[item_in_5], set_permit + row[item_in_4])
comware.CLI(strcli)
|
<commit_before><commit_msg>Set interface-config from pre-defined csv-file<commit_after>
__version__ = '0.9'
__author__ = 'Remi Batist'
# Importing interface-settings from pre-defined csv-file
# used row format shown in the example below
# csv delimiter ' ; '
# interface description linktype permitvlan pvid
# GigabitEthernet1/0/21 server-1 access 23
# GigabitEthernet1/0/22 server-2 trunk 10 12 10
#### Importing python modules
import csv
import comware
import os
import sys
import termios
#### File input function
fd = sys.stdin.fileno();
new = termios.tcgetattr(fd)
new[3] = new[3] | termios.ICANON | termios.ECHO
new[6] [termios.VMIN] = 1
new[6] [termios.VTIME] = 0
termios.tcsetattr(fd, termios.TCSANOW, new)
termios.tcsendbreak(fd,0)
print ''
file_in = raw_input(" csv-file to import: ")
#### Importing rows
item_in_1 = 'interface'
item_in_2 = 'description'
item_in_3 = 'linktype'
item_in_4 = 'permitvlan'
item_in_5 = 'pvid'
#### Open file
with open(file_in,'r') as f:
reader = csv.DictReader(f, delimiter=';')
rows = list(reader)
#### Reading file
for row in rows:
#### Setting link-type
if row[item_in_3] == 'access':
linktype = 'port link-type access'
set_pvid = 'port access vlan '
set_permit =''
else:
linktype = 'port link-type trunk'
set_pvid = 'port trunk pvid vlan '
set_permit ='port trunk permit vlan '
#### Deploying settings
print ''
print 'Deploying settings...'
print ''
strcli = "system ;%s ;%s ;%s ;%s ;%s" % ('interface '+row[item_in_1], 'description ' +row[item_in_2], linktype, set_pvid + row[item_in_5], set_permit + row[item_in_4])
comware.CLI(strcli)
|
|
b1fdbd1d256c7cac8c5e79f05af5e514974d3ef2
|
tests/test_web_application.py
|
tests/test_web_application.py
|
import asyncio
import pytest
from aiohttp import web, log
from unittest import mock
def test_app_ctor(loop):
app = web.Application(loop=loop)
assert loop is app.loop
assert app.logger is log.web_logger
def test_app_call(loop):
app = web.Application(loop=loop)
assert app is app()
def test_app_default_loop(loop):
asyncio.set_event_loop(loop)
app = web.Application()
assert loop is app.loop
@pytest.mark.run_loop
def test_app_register_on_finish(loop):
app = web.Application(loop=loop)
cb1 = mock.Mock()
cb2 = mock.Mock()
app.register_on_finish(cb1, 1, b=2)
app.register_on_finish(cb2, 2, c=3)
yield from app.finish()
cb1.assert_called_once_with(app, 1, b=2)
cb2.assert_called_once_with(app, 2, c=3)
@pytest.mark.run_loop
def test_app_register_coro(loop):
app = web.Application(loop=loop)
fut = asyncio.Future(loop=loop)
@asyncio.coroutine
def cb(app):
yield from asyncio.sleep(0.001, loop=loop)
fut.set_result(123)
app.register_on_finish(cb)
yield from app.finish()
assert fut.done()
assert 123 == fut.result()
@pytest.mark.run_loop
def test_app_error_in_finish_callbacks(loop):
app = web.Application(loop=loop)
err = RuntimeError('bad call')
app.register_on_finish(mock.Mock(side_effect=err))
handler = mock.Mock()
loop.set_exception_handler(handler)
yield from app.finish()
exc_info = {'exception': err,
'application': app,
'message': 'Error in finish callback'}
handler.assert_called_once_with(loop, exc_info)
def test_non_default_router(loop):
router = web.UrlDispatcher()
app = web.Application(loop=loop, router=router)
assert router is app.router
def test_logging(self):
logger = mock.Mock()
app = web.Application(loop=self.loop)
app.logger = logger
self.assertIs(app.logger, logger)
|
Convert web.Application tests to pytest style
|
Convert web.Application tests to pytest style
|
Python
|
apache-2.0
|
mind1master/aiohttp,decentfox/aiohttp,panda73111/aiohttp,rutsky/aiohttp,arthurdarcet/aiohttp,Eyepea/aiohttp,alex-eri/aiohttp-1,singulared/aiohttp,elastic-coders/aiohttp,mind1master/aiohttp,esaezgil/aiohttp,arthurdarcet/aiohttp,vaskalas/aiohttp,z2v/aiohttp,esaezgil/aiohttp,Insoleet/aiohttp,elastic-coders/aiohttp,jettify/aiohttp,jashandeep-sohi/aiohttp,esaezgil/aiohttp,panda73111/aiohttp,juliatem/aiohttp,KeepSafe/aiohttp,jashandeep-sohi/aiohttp,KeepSafe/aiohttp,z2v/aiohttp,alex-eri/aiohttp-1,decentfox/aiohttp,rutsky/aiohttp,moden-py/aiohttp,moden-py/aiohttp,KeepSafe/aiohttp,moden-py/aiohttp,jettify/aiohttp,hellysmile/aiohttp,vaskalas/aiohttp,vaskalas/aiohttp,singulared/aiohttp,AraHaanOrg/aiohttp,elastic-coders/aiohttp,singulared/aiohttp,alex-eri/aiohttp-1,pfreixes/aiohttp,jettify/aiohttp,pfreixes/aiohttp,panda73111/aiohttp,juliatem/aiohttp,decentfox/aiohttp,jashandeep-sohi/aiohttp,hellysmile/aiohttp,mind1master/aiohttp,z2v/aiohttp,arthurdarcet/aiohttp,playpauseandstop/aiohttp,rutsky/aiohttp,AraHaanOrg/aiohttp
|
Convert web.Application tests to pytest style
|
import asyncio
import pytest
from aiohttp import web, log
from unittest import mock
def test_app_ctor(loop):
app = web.Application(loop=loop)
assert loop is app.loop
assert app.logger is log.web_logger
def test_app_call(loop):
app = web.Application(loop=loop)
assert app is app()
def test_app_default_loop(loop):
asyncio.set_event_loop(loop)
app = web.Application()
assert loop is app.loop
@pytest.mark.run_loop
def test_app_register_on_finish(loop):
app = web.Application(loop=loop)
cb1 = mock.Mock()
cb2 = mock.Mock()
app.register_on_finish(cb1, 1, b=2)
app.register_on_finish(cb2, 2, c=3)
yield from app.finish()
cb1.assert_called_once_with(app, 1, b=2)
cb2.assert_called_once_with(app, 2, c=3)
@pytest.mark.run_loop
def test_app_register_coro(loop):
app = web.Application(loop=loop)
fut = asyncio.Future(loop=loop)
@asyncio.coroutine
def cb(app):
yield from asyncio.sleep(0.001, loop=loop)
fut.set_result(123)
app.register_on_finish(cb)
yield from app.finish()
assert fut.done()
assert 123 == fut.result()
@pytest.mark.run_loop
def test_app_error_in_finish_callbacks(loop):
app = web.Application(loop=loop)
err = RuntimeError('bad call')
app.register_on_finish(mock.Mock(side_effect=err))
handler = mock.Mock()
loop.set_exception_handler(handler)
yield from app.finish()
exc_info = {'exception': err,
'application': app,
'message': 'Error in finish callback'}
handler.assert_called_once_with(loop, exc_info)
def test_non_default_router(loop):
router = web.UrlDispatcher()
app = web.Application(loop=loop, router=router)
assert router is app.router
def test_logging(self):
logger = mock.Mock()
app = web.Application(loop=self.loop)
app.logger = logger
self.assertIs(app.logger, logger)
|
<commit_before><commit_msg>Convert web.Application tests to pytest style<commit_after>
|
import asyncio
import pytest
from aiohttp import web, log
from unittest import mock
def test_app_ctor(loop):
app = web.Application(loop=loop)
assert loop is app.loop
assert app.logger is log.web_logger
def test_app_call(loop):
app = web.Application(loop=loop)
assert app is app()
def test_app_default_loop(loop):
asyncio.set_event_loop(loop)
app = web.Application()
assert loop is app.loop
@pytest.mark.run_loop
def test_app_register_on_finish(loop):
app = web.Application(loop=loop)
cb1 = mock.Mock()
cb2 = mock.Mock()
app.register_on_finish(cb1, 1, b=2)
app.register_on_finish(cb2, 2, c=3)
yield from app.finish()
cb1.assert_called_once_with(app, 1, b=2)
cb2.assert_called_once_with(app, 2, c=3)
@pytest.mark.run_loop
def test_app_register_coro(loop):
app = web.Application(loop=loop)
fut = asyncio.Future(loop=loop)
@asyncio.coroutine
def cb(app):
yield from asyncio.sleep(0.001, loop=loop)
fut.set_result(123)
app.register_on_finish(cb)
yield from app.finish()
assert fut.done()
assert 123 == fut.result()
@pytest.mark.run_loop
def test_app_error_in_finish_callbacks(loop):
app = web.Application(loop=loop)
err = RuntimeError('bad call')
app.register_on_finish(mock.Mock(side_effect=err))
handler = mock.Mock()
loop.set_exception_handler(handler)
yield from app.finish()
exc_info = {'exception': err,
'application': app,
'message': 'Error in finish callback'}
handler.assert_called_once_with(loop, exc_info)
def test_non_default_router(loop):
router = web.UrlDispatcher()
app = web.Application(loop=loop, router=router)
assert router is app.router
def test_logging(self):
logger = mock.Mock()
app = web.Application(loop=self.loop)
app.logger = logger
self.assertIs(app.logger, logger)
|
Convert web.Application tests to pytest styleimport asyncio
import pytest
from aiohttp import web, log
from unittest import mock
def test_app_ctor(loop):
app = web.Application(loop=loop)
assert loop is app.loop
assert app.logger is log.web_logger
def test_app_call(loop):
app = web.Application(loop=loop)
assert app is app()
def test_app_default_loop(loop):
asyncio.set_event_loop(loop)
app = web.Application()
assert loop is app.loop
@pytest.mark.run_loop
def test_app_register_on_finish(loop):
app = web.Application(loop=loop)
cb1 = mock.Mock()
cb2 = mock.Mock()
app.register_on_finish(cb1, 1, b=2)
app.register_on_finish(cb2, 2, c=3)
yield from app.finish()
cb1.assert_called_once_with(app, 1, b=2)
cb2.assert_called_once_with(app, 2, c=3)
@pytest.mark.run_loop
def test_app_register_coro(loop):
app = web.Application(loop=loop)
fut = asyncio.Future(loop=loop)
@asyncio.coroutine
def cb(app):
yield from asyncio.sleep(0.001, loop=loop)
fut.set_result(123)
app.register_on_finish(cb)
yield from app.finish()
assert fut.done()
assert 123 == fut.result()
@pytest.mark.run_loop
def test_app_error_in_finish_callbacks(loop):
app = web.Application(loop=loop)
err = RuntimeError('bad call')
app.register_on_finish(mock.Mock(side_effect=err))
handler = mock.Mock()
loop.set_exception_handler(handler)
yield from app.finish()
exc_info = {'exception': err,
'application': app,
'message': 'Error in finish callback'}
handler.assert_called_once_with(loop, exc_info)
def test_non_default_router(loop):
router = web.UrlDispatcher()
app = web.Application(loop=loop, router=router)
assert router is app.router
def test_logging(self):
logger = mock.Mock()
app = web.Application(loop=self.loop)
app.logger = logger
self.assertIs(app.logger, logger)
|
<commit_before><commit_msg>Convert web.Application tests to pytest style<commit_after>import asyncio
import pytest
from aiohttp import web, log
from unittest import mock
def test_app_ctor(loop):
app = web.Application(loop=loop)
assert loop is app.loop
assert app.logger is log.web_logger
def test_app_call(loop):
app = web.Application(loop=loop)
assert app is app()
def test_app_default_loop(loop):
asyncio.set_event_loop(loop)
app = web.Application()
assert loop is app.loop
@pytest.mark.run_loop
def test_app_register_on_finish(loop):
app = web.Application(loop=loop)
cb1 = mock.Mock()
cb2 = mock.Mock()
app.register_on_finish(cb1, 1, b=2)
app.register_on_finish(cb2, 2, c=3)
yield from app.finish()
cb1.assert_called_once_with(app, 1, b=2)
cb2.assert_called_once_with(app, 2, c=3)
@pytest.mark.run_loop
def test_app_register_coro(loop):
app = web.Application(loop=loop)
fut = asyncio.Future(loop=loop)
@asyncio.coroutine
def cb(app):
yield from asyncio.sleep(0.001, loop=loop)
fut.set_result(123)
app.register_on_finish(cb)
yield from app.finish()
assert fut.done()
assert 123 == fut.result()
@pytest.mark.run_loop
def test_app_error_in_finish_callbacks(loop):
app = web.Application(loop=loop)
err = RuntimeError('bad call')
app.register_on_finish(mock.Mock(side_effect=err))
handler = mock.Mock()
loop.set_exception_handler(handler)
yield from app.finish()
exc_info = {'exception': err,
'application': app,
'message': 'Error in finish callback'}
handler.assert_called_once_with(loop, exc_info)
def test_non_default_router(loop):
router = web.UrlDispatcher()
app = web.Application(loop=loop, router=router)
assert router is app.router
def test_logging(self):
logger = mock.Mock()
app = web.Application(loop=self.loop)
app.logger = logger
self.assertIs(app.logger, logger)
|
|
8782bf61d97000a9267929ee54a158a78e41372c
|
examples/plt2xyz.py
|
examples/plt2xyz.py
|
#!/usr/bin/env python
"""
usage: plt2xyz.py PLTFILE > outfile.xyz
Dumps an XYZ point cloud from a compiled Compass plot file, with the
assumption that all "hidden" shots are splays which represent the cave's
walls.
If the project is tied to realworld UTM coordinates, then X, Y, and Z will
be in meters. If no UTM zone is specified, then coordinates are exactly as
stored in the .PLT file (feet relative to the zero datum).
"""
from __future__ import print_function
from davies.compass.plt import CompassPltParser
FT_TO_M = 0.3048 # convert feet to meters
def plt2xyz(fname):
"""Convert a Compass plot file to XYZ pointcloud"""
parser = CompassPltParser(fname)
plt = parser.parse()
for segment in plt:
for command in segment:
if command.cmd == 'd':
if plt.utm_zone:
x, y, z = command.x * FT_TO_M, command.y * FT_TO_M, command.z * FT_TO_M
else:
x, y, z = command.x, command.y, command.z
print('%.3f\t%.3f\t%.3f' % (x, y, z))
def main():
import sys
import logging
logging.basicConfig()
if len(sys.argv) < 2:
print('usage: %s PLTFILE' % sys.argv[1], file=sys.stderr)
sys.exit(2)
plt2xyz(sys.argv[1])
if __name__ == '__main__':
main()
|
Add example script which converts a Compass .PLT plot file with splay shots to an XYZ pointcloud.
|
Add example script which converts a Compass .PLT plot file with splay shots to an XYZ pointcloud.
|
Python
|
mit
|
riggsd/davies
|
Add example script which converts a Compass .PLT plot file with splay shots to an XYZ pointcloud.
|
#!/usr/bin/env python
"""
usage: plt2xyz.py PLTFILE > outfile.xyz
Dumps an XYZ point cloud from a compiled Compass plot file, with the
assumption that all "hidden" shots are splays which represent the cave's
walls.
If the project is tied to realworld UTM coordinates, then X, Y, and Z will
be in meters. If no UTM zone is specified, then coordinates are exactly as
stored in the .PLT file (feet relative to the zero datum).
"""
from __future__ import print_function
from davies.compass.plt import CompassPltParser
FT_TO_M = 0.3048 # convert feet to meters
def plt2xyz(fname):
"""Convert a Compass plot file to XYZ pointcloud"""
parser = CompassPltParser(fname)
plt = parser.parse()
for segment in plt:
for command in segment:
if command.cmd == 'd':
if plt.utm_zone:
x, y, z = command.x * FT_TO_M, command.y * FT_TO_M, command.z * FT_TO_M
else:
x, y, z = command.x, command.y, command.z
print('%.3f\t%.3f\t%.3f' % (x, y, z))
def main():
import sys
import logging
logging.basicConfig()
if len(sys.argv) < 2:
print('usage: %s PLTFILE' % sys.argv[1], file=sys.stderr)
sys.exit(2)
plt2xyz(sys.argv[1])
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add example script which converts a Compass .PLT plot file with splay shots to an XYZ pointcloud.<commit_after>
|
#!/usr/bin/env python
"""
usage: plt2xyz.py PLTFILE > outfile.xyz
Dumps an XYZ point cloud from a compiled Compass plot file, with the
assumption that all "hidden" shots are splays which represent the cave's
walls.
If the project is tied to realworld UTM coordinates, then X, Y, and Z will
be in meters. If no UTM zone is specified, then coordinates are exactly as
stored in the .PLT file (feet relative to the zero datum).
"""
from __future__ import print_function
from davies.compass.plt import CompassPltParser
FT_TO_M = 0.3048 # convert feet to meters
def plt2xyz(fname):
"""Convert a Compass plot file to XYZ pointcloud"""
parser = CompassPltParser(fname)
plt = parser.parse()
for segment in plt:
for command in segment:
if command.cmd == 'd':
if plt.utm_zone:
x, y, z = command.x * FT_TO_M, command.y * FT_TO_M, command.z * FT_TO_M
else:
x, y, z = command.x, command.y, command.z
print('%.3f\t%.3f\t%.3f' % (x, y, z))
def main():
import sys
import logging
logging.basicConfig()
if len(sys.argv) < 2:
print('usage: %s PLTFILE' % sys.argv[1], file=sys.stderr)
sys.exit(2)
plt2xyz(sys.argv[1])
if __name__ == '__main__':
main()
|
Add example script which converts a Compass .PLT plot file with splay shots to an XYZ pointcloud.#!/usr/bin/env python
"""
usage: plt2xyz.py PLTFILE > outfile.xyz
Dumps an XYZ point cloud from a compiled Compass plot file, with the
assumption that all "hidden" shots are splays which represent the cave's
walls.
If the project is tied to realworld UTM coordinates, then X, Y, and Z will
be in meters. If no UTM zone is specified, then coordinates are exactly as
stored in the .PLT file (feet relative to the zero datum).
"""
from __future__ import print_function
from davies.compass.plt import CompassPltParser
FT_TO_M = 0.3048 # convert feet to meters
def plt2xyz(fname):
"""Convert a Compass plot file to XYZ pointcloud"""
parser = CompassPltParser(fname)
plt = parser.parse()
for segment in plt:
for command in segment:
if command.cmd == 'd':
if plt.utm_zone:
x, y, z = command.x * FT_TO_M, command.y * FT_TO_M, command.z * FT_TO_M
else:
x, y, z = command.x, command.y, command.z
print('%.3f\t%.3f\t%.3f' % (x, y, z))
def main():
import sys
import logging
logging.basicConfig()
if len(sys.argv) < 2:
print('usage: %s PLTFILE' % sys.argv[1], file=sys.stderr)
sys.exit(2)
plt2xyz(sys.argv[1])
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add example script which converts a Compass .PLT plot file with splay shots to an XYZ pointcloud.<commit_after>#!/usr/bin/env python
"""
usage: plt2xyz.py PLTFILE > outfile.xyz
Dumps an XYZ point cloud from a compiled Compass plot file, with the
assumption that all "hidden" shots are splays which represent the cave's
walls.
If the project is tied to realworld UTM coordinates, then X, Y, and Z will
be in meters. If no UTM zone is specified, then coordinates are exactly as
stored in the .PLT file (feet relative to the zero datum).
"""
from __future__ import print_function
from davies.compass.plt import CompassPltParser
FT_TO_M = 0.3048 # convert feet to meters
def plt2xyz(fname):
"""Convert a Compass plot file to XYZ pointcloud"""
parser = CompassPltParser(fname)
plt = parser.parse()
for segment in plt:
for command in segment:
if command.cmd == 'd':
if plt.utm_zone:
x, y, z = command.x * FT_TO_M, command.y * FT_TO_M, command.z * FT_TO_M
else:
x, y, z = command.x, command.y, command.z
print('%.3f\t%.3f\t%.3f' % (x, y, z))
def main():
import sys
import logging
logging.basicConfig()
if len(sys.argv) < 2:
print('usage: %s PLTFILE' % sys.argv[1], file=sys.stderr)
sys.exit(2)
plt2xyz(sys.argv[1])
if __name__ == '__main__':
main()
|
|
e1cd24cf2f7133a6ad766d580cd728b4997b141d
|
COURSE/ML/lab2/lab2_vedio.py
|
COURSE/ML/lab2/lab2_vedio.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import time
import pandas as pd
from sklearn.preprocessing import normalize
from sklearn.neighbors import KNeighborsClassifier as KNN
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import KFold
from scipy.spatial.distance import cosine as Cos
dataset_url = 'http://mlr.cs.umass.edu/ml/machine-learning-databases/' + \
'wine-quality/winequality-white.csv'
data = pd.read_csv(dataset_url, sep=';')
feature = normalize(data.iloc[:, :-2])
target = data.iloc[:, -1]
start = time.time()
nbr = KNN(n_neighbors=11, weights='distance', \
algorithm='brute', metric='manhattan')
nbr.fit(feature, target)
pred = nbr.predict(feature)
mx = confusion_matrix(target, pred)
print(mx.trace()/mx.sum())
print("===== used %s seconds =====" % (time.time()-start))
start = time.time()
kf = KFold(n_splits=12)
total_rate = 0
for train_index, test_index in kf.split(data):
train_feature, test_feature = feature[train_index], feature[test_index]
train_target, test_target = target[train_index], target[test_index]
nbr = KNN(n_neighbors=11, weights='distance', \
algorithm='brute', metric='manhattan')
nbr.fit(train_feature, train_target)
pred = nbr.predict(test_feature)
mx = confusion_matrix(test_target, pred)
total_rate += mx.trace()/mx.sum()
print(total_rate/12)
print("===== used %s seconds =====" % (time.time()-start))
# Cos re
def cosDist(a, b):
return Cos(a, b)
start = time.time()
nbr = KNN(n_neighbors=11, algorithm='brute', metric=cosDist)
nbr.fit(feature, target)
pred = nbr.predict(feature)
mx = confusion_matrix(target, pred)
print(mx.trace()/mx.sum())
print("===== used %s seconds =====" % (time.time()-start))
|
Add ML lab2 vedio version
|
Add ML lab2 vedio version
|
Python
|
mit
|
calee0219/Programming,calee0219/Programming,calee0219/Programming,calee0219/Programming,calee0219/Programming,calee0219/Programming,calee0219/Programming
|
Add ML lab2 vedio version
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import time
import pandas as pd
from sklearn.preprocessing import normalize
from sklearn.neighbors import KNeighborsClassifier as KNN
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import KFold
from scipy.spatial.distance import cosine as Cos
dataset_url = 'http://mlr.cs.umass.edu/ml/machine-learning-databases/' + \
'wine-quality/winequality-white.csv'
data = pd.read_csv(dataset_url, sep=';')
feature = normalize(data.iloc[:, :-2])
target = data.iloc[:, -1]
start = time.time()
nbr = KNN(n_neighbors=11, weights='distance', \
algorithm='brute', metric='manhattan')
nbr.fit(feature, target)
pred = nbr.predict(feature)
mx = confusion_matrix(target, pred)
print(mx.trace()/mx.sum())
print("===== used %s seconds =====" % (time.time()-start))
start = time.time()
kf = KFold(n_splits=12)
total_rate = 0
for train_index, test_index in kf.split(data):
train_feature, test_feature = feature[train_index], feature[test_index]
train_target, test_target = target[train_index], target[test_index]
nbr = KNN(n_neighbors=11, weights='distance', \
algorithm='brute', metric='manhattan')
nbr.fit(train_feature, train_target)
pred = nbr.predict(test_feature)
mx = confusion_matrix(test_target, pred)
total_rate += mx.trace()/mx.sum()
print(total_rate/12)
print("===== used %s seconds =====" % (time.time()-start))
# Cos re
def cosDist(a, b):
return Cos(a, b)
start = time.time()
nbr = KNN(n_neighbors=11, algorithm='brute', metric=cosDist)
nbr.fit(feature, target)
pred = nbr.predict(feature)
mx = confusion_matrix(target, pred)
print(mx.trace()/mx.sum())
print("===== used %s seconds =====" % (time.time()-start))
|
<commit_before><commit_msg>Add ML lab2 vedio version<commit_after>
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import time
import pandas as pd
from sklearn.preprocessing import normalize
from sklearn.neighbors import KNeighborsClassifier as KNN
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import KFold
from scipy.spatial.distance import cosine as Cos
dataset_url = 'http://mlr.cs.umass.edu/ml/machine-learning-databases/' + \
'wine-quality/winequality-white.csv'
data = pd.read_csv(dataset_url, sep=';')
feature = normalize(data.iloc[:, :-2])
target = data.iloc[:, -1]
start = time.time()
nbr = KNN(n_neighbors=11, weights='distance', \
algorithm='brute', metric='manhattan')
nbr.fit(feature, target)
pred = nbr.predict(feature)
mx = confusion_matrix(target, pred)
print(mx.trace()/mx.sum())
print("===== used %s seconds =====" % (time.time()-start))
start = time.time()
kf = KFold(n_splits=12)
total_rate = 0
for train_index, test_index in kf.split(data):
train_feature, test_feature = feature[train_index], feature[test_index]
train_target, test_target = target[train_index], target[test_index]
nbr = KNN(n_neighbors=11, weights='distance', \
algorithm='brute', metric='manhattan')
nbr.fit(train_feature, train_target)
pred = nbr.predict(test_feature)
mx = confusion_matrix(test_target, pred)
total_rate += mx.trace()/mx.sum()
print(total_rate/12)
print("===== used %s seconds =====" % (time.time()-start))
# Cos re
def cosDist(a, b):
return Cos(a, b)
start = time.time()
nbr = KNN(n_neighbors=11, algorithm='brute', metric=cosDist)
nbr.fit(feature, target)
pred = nbr.predict(feature)
mx = confusion_matrix(target, pred)
print(mx.trace()/mx.sum())
print("===== used %s seconds =====" % (time.time()-start))
|
Add ML lab2 vedio version#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import time
import pandas as pd
from sklearn.preprocessing import normalize
from sklearn.neighbors import KNeighborsClassifier as KNN
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import KFold
from scipy.spatial.distance import cosine as Cos
dataset_url = 'http://mlr.cs.umass.edu/ml/machine-learning-databases/' + \
'wine-quality/winequality-white.csv'
data = pd.read_csv(dataset_url, sep=';')
feature = normalize(data.iloc[:, :-2])
target = data.iloc[:, -1]
start = time.time()
nbr = KNN(n_neighbors=11, weights='distance', \
algorithm='brute', metric='manhattan')
nbr.fit(feature, target)
pred = nbr.predict(feature)
mx = confusion_matrix(target, pred)
print(mx.trace()/mx.sum())
print("===== used %s seconds =====" % (time.time()-start))
start = time.time()
kf = KFold(n_splits=12)
total_rate = 0
for train_index, test_index in kf.split(data):
train_feature, test_feature = feature[train_index], feature[test_index]
train_target, test_target = target[train_index], target[test_index]
nbr = KNN(n_neighbors=11, weights='distance', \
algorithm='brute', metric='manhattan')
nbr.fit(train_feature, train_target)
pred = nbr.predict(test_feature)
mx = confusion_matrix(test_target, pred)
total_rate += mx.trace()/mx.sum()
print(total_rate/12)
print("===== used %s seconds =====" % (time.time()-start))
# Cos re
def cosDist(a, b):
return Cos(a, b)
start = time.time()
nbr = KNN(n_neighbors=11, algorithm='brute', metric=cosDist)
nbr.fit(feature, target)
pred = nbr.predict(feature)
mx = confusion_matrix(target, pred)
print(mx.trace()/mx.sum())
print("===== used %s seconds =====" % (time.time()-start))
|
<commit_before><commit_msg>Add ML lab2 vedio version<commit_after>#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import time
import pandas as pd
from sklearn.preprocessing import normalize
from sklearn.neighbors import KNeighborsClassifier as KNN
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import KFold
from scipy.spatial.distance import cosine as Cos
dataset_url = 'http://mlr.cs.umass.edu/ml/machine-learning-databases/' + \
'wine-quality/winequality-white.csv'
data = pd.read_csv(dataset_url, sep=';')
feature = normalize(data.iloc[:, :-2])
target = data.iloc[:, -1]
start = time.time()
nbr = KNN(n_neighbors=11, weights='distance', \
algorithm='brute', metric='manhattan')
nbr.fit(feature, target)
pred = nbr.predict(feature)
mx = confusion_matrix(target, pred)
print(mx.trace()/mx.sum())
print("===== used %s seconds =====" % (time.time()-start))
start = time.time()
kf = KFold(n_splits=12)
total_rate = 0
for train_index, test_index in kf.split(data):
train_feature, test_feature = feature[train_index], feature[test_index]
train_target, test_target = target[train_index], target[test_index]
nbr = KNN(n_neighbors=11, weights='distance', \
algorithm='brute', metric='manhattan')
nbr.fit(train_feature, train_target)
pred = nbr.predict(test_feature)
mx = confusion_matrix(test_target, pred)
total_rate += mx.trace()/mx.sum()
print(total_rate/12)
print("===== used %s seconds =====" % (time.time()-start))
# Cos re
def cosDist(a, b):
return Cos(a, b)
start = time.time()
nbr = KNN(n_neighbors=11, algorithm='brute', metric=cosDist)
nbr.fit(feature, target)
pred = nbr.predict(feature)
mx = confusion_matrix(target, pred)
print(mx.trace()/mx.sum())
print("===== used %s seconds =====" % (time.time()-start))
|
|
e81c421e1ef2afe896ec035e8af137415eeeab46
|
geoPrint.py
|
geoPrint.py
|
#!/usr/bin/env python
import pygeoip, dpkt, socket, optparse
gi = pygeoip.GeoIP('/opt/GeoIP/Geo.dat')
tgt = '173.255.226.98' #Could be added as an argument
def banner():
print "#### IP to Physical Address Map p131 ####"
pirnt ""
def printRecort(tgt):
#By itself can print the Lon/Lat of an IP address
rec = gi-record_by_name(tgt)
city = rec['city']
region = rec['region_name']
country = rec['country_name']
long = rec['longitude']
lat = rec['latitude']
print '[*] Target: '+tgt+' Geo-located. '
print '[+] '+str(city)+', '+str(region)+', 'str(country)
print '[+] Latitude: '+str(lat)+', Longitude: '+str(long)
def retGeoStr(ip):
try:
rec = gi.gi-record_by_name(ip)
city = rec['city']
country = rec['country']
if city != '':
geoLoc = city+ ', '+country
else:
geoLoc = country
return geoLoc
except Exception, e:
return 'Unregistered'
def printPcap(pcap):
#By itself can print the src & dest of pcap. Include main() that's commented out in this function
for (ts, buf) in pcap:
try:
eth = dpkt.ethernet.Ethernet(buf)
ip = eth.data
src = cocket.inet_ntoa(ip.src)
dst = socket.inet_ntoa(ip.dst)
print '[+] Src: '+src+ ' --> Dst: '+dst
except:
pass
'''
def main():
f = open('geotest.pcap')
pcap = dpkt.pcap.Reader(f)
printPcap(pcap)
'''
def main():
parser = optparse.OptionParser('usage%prog '+'-p <pcap file>')
parser.add_option('-p', dest='pcapFile', type ='string', help='specify pcap file')
(options, args) = parser.parse_args()
if options.pcapFile == None:
print parser.usage
exit(0)
pcapFile = options.pcapFile
f=open(pcapFile)
pcap = dpkt.pcap.Reader(f)
printPcap(pcap)
if __name__ == '__main__':
main()
|
DEBUG AND TEST. This one had a lot of typos
|
DEBUG AND TEST. This one had a lot of typos
|
Python
|
mit
|
n1cfury/ViolentPython
|
DEBUG AND TEST. This one had a lot of typos
|
#!/usr/bin/env python
import pygeoip, dpkt, socket, optparse
gi = pygeoip.GeoIP('/opt/GeoIP/Geo.dat')
tgt = '173.255.226.98' #Could be added as an argument
def banner():
print "#### IP to Physical Address Map p131 ####"
pirnt ""
def printRecort(tgt):
#By itself can print the Lon/Lat of an IP address
rec = gi-record_by_name(tgt)
city = rec['city']
region = rec['region_name']
country = rec['country_name']
long = rec['longitude']
lat = rec['latitude']
print '[*] Target: '+tgt+' Geo-located. '
print '[+] '+str(city)+', '+str(region)+', 'str(country)
print '[+] Latitude: '+str(lat)+', Longitude: '+str(long)
def retGeoStr(ip):
try:
rec = gi.gi-record_by_name(ip)
city = rec['city']
country = rec['country']
if city != '':
geoLoc = city+ ', '+country
else:
geoLoc = country
return geoLoc
except Exception, e:
return 'Unregistered'
def printPcap(pcap):
#By itself can print the src & dest of pcap. Include main() that's commented out in this function
for (ts, buf) in pcap:
try:
eth = dpkt.ethernet.Ethernet(buf)
ip = eth.data
src = cocket.inet_ntoa(ip.src)
dst = socket.inet_ntoa(ip.dst)
print '[+] Src: '+src+ ' --> Dst: '+dst
except:
pass
'''
def main():
f = open('geotest.pcap')
pcap = dpkt.pcap.Reader(f)
printPcap(pcap)
'''
def main():
parser = optparse.OptionParser('usage%prog '+'-p <pcap file>')
parser.add_option('-p', dest='pcapFile', type ='string', help='specify pcap file')
(options, args) = parser.parse_args()
if options.pcapFile == None:
print parser.usage
exit(0)
pcapFile = options.pcapFile
f=open(pcapFile)
pcap = dpkt.pcap.Reader(f)
printPcap(pcap)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>DEBUG AND TEST. This one had a lot of typos<commit_after>
|
#!/usr/bin/env python
import pygeoip, dpkt, socket, optparse
gi = pygeoip.GeoIP('/opt/GeoIP/Geo.dat')
tgt = '173.255.226.98' #Could be added as an argument
def banner():
print "#### IP to Physical Address Map p131 ####"
pirnt ""
def printRecort(tgt):
#By itself can print the Lon/Lat of an IP address
rec = gi-record_by_name(tgt)
city = rec['city']
region = rec['region_name']
country = rec['country_name']
long = rec['longitude']
lat = rec['latitude']
print '[*] Target: '+tgt+' Geo-located. '
print '[+] '+str(city)+', '+str(region)+', 'str(country)
print '[+] Latitude: '+str(lat)+', Longitude: '+str(long)
def retGeoStr(ip):
try:
rec = gi.gi-record_by_name(ip)
city = rec['city']
country = rec['country']
if city != '':
geoLoc = city+ ', '+country
else:
geoLoc = country
return geoLoc
except Exception, e:
return 'Unregistered'
def printPcap(pcap):
#By itself can print the src & dest of pcap. Include main() that's commented out in this function
for (ts, buf) in pcap:
try:
eth = dpkt.ethernet.Ethernet(buf)
ip = eth.data
src = cocket.inet_ntoa(ip.src)
dst = socket.inet_ntoa(ip.dst)
print '[+] Src: '+src+ ' --> Dst: '+dst
except:
pass
'''
def main():
f = open('geotest.pcap')
pcap = dpkt.pcap.Reader(f)
printPcap(pcap)
'''
def main():
parser = optparse.OptionParser('usage%prog '+'-p <pcap file>')
parser.add_option('-p', dest='pcapFile', type ='string', help='specify pcap file')
(options, args) = parser.parse_args()
if options.pcapFile == None:
print parser.usage
exit(0)
pcapFile = options.pcapFile
f=open(pcapFile)
pcap = dpkt.pcap.Reader(f)
printPcap(pcap)
if __name__ == '__main__':
main()
|
DEBUG AND TEST. This one had a lot of typos#!/usr/bin/env python
import pygeoip, dpkt, socket, optparse
gi = pygeoip.GeoIP('/opt/GeoIP/Geo.dat')
tgt = '173.255.226.98' #Could be added as an argument
def banner():
print "#### IP to Physical Address Map p131 ####"
pirnt ""
def printRecort(tgt):
#By itself can print the Lon/Lat of an IP address
rec = gi-record_by_name(tgt)
city = rec['city']
region = rec['region_name']
country = rec['country_name']
long = rec['longitude']
lat = rec['latitude']
print '[*] Target: '+tgt+' Geo-located. '
print '[+] '+str(city)+', '+str(region)+', 'str(country)
print '[+] Latitude: '+str(lat)+', Longitude: '+str(long)
def retGeoStr(ip):
try:
rec = gi.gi-record_by_name(ip)
city = rec['city']
country = rec['country']
if city != '':
geoLoc = city+ ', '+country
else:
geoLoc = country
return geoLoc
except Exception, e:
return 'Unregistered'
def printPcap(pcap):
#By itself can print the src & dest of pcap. Include main() that's commented out in this function
for (ts, buf) in pcap:
try:
eth = dpkt.ethernet.Ethernet(buf)
ip = eth.data
src = cocket.inet_ntoa(ip.src)
dst = socket.inet_ntoa(ip.dst)
print '[+] Src: '+src+ ' --> Dst: '+dst
except:
pass
'''
def main():
f = open('geotest.pcap')
pcap = dpkt.pcap.Reader(f)
printPcap(pcap)
'''
def main():
parser = optparse.OptionParser('usage%prog '+'-p <pcap file>')
parser.add_option('-p', dest='pcapFile', type ='string', help='specify pcap file')
(options, args) = parser.parse_args()
if options.pcapFile == None:
print parser.usage
exit(0)
pcapFile = options.pcapFile
f=open(pcapFile)
pcap = dpkt.pcap.Reader(f)
printPcap(pcap)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>DEBUG AND TEST. This one had a lot of typos<commit_after>#!/usr/bin/env python
import pygeoip, dpkt, socket, optparse
gi = pygeoip.GeoIP('/opt/GeoIP/Geo.dat')
tgt = '173.255.226.98' #Could be added as an argument
def banner():
print "#### IP to Physical Address Map p131 ####"
pirnt ""
def printRecort(tgt):
#By itself can print the Lon/Lat of an IP address
rec = gi-record_by_name(tgt)
city = rec['city']
region = rec['region_name']
country = rec['country_name']
long = rec['longitude']
lat = rec['latitude']
print '[*] Target: '+tgt+' Geo-located. '
print '[+] '+str(city)+', '+str(region)+', 'str(country)
print '[+] Latitude: '+str(lat)+', Longitude: '+str(long)
def retGeoStr(ip):
try:
rec = gi.gi-record_by_name(ip)
city = rec['city']
country = rec['country']
if city != '':
geoLoc = city+ ', '+country
else:
geoLoc = country
return geoLoc
except Exception, e:
return 'Unregistered'
def printPcap(pcap):
#By itself can print the src & dest of pcap. Include main() that's commented out in this function
for (ts, buf) in pcap:
try:
eth = dpkt.ethernet.Ethernet(buf)
ip = eth.data
src = cocket.inet_ntoa(ip.src)
dst = socket.inet_ntoa(ip.dst)
print '[+] Src: '+src+ ' --> Dst: '+dst
except:
pass
'''
def main():
f = open('geotest.pcap')
pcap = dpkt.pcap.Reader(f)
printPcap(pcap)
'''
def main():
parser = optparse.OptionParser('usage%prog '+'-p <pcap file>')
parser.add_option('-p', dest='pcapFile', type ='string', help='specify pcap file')
(options, args) = parser.parse_args()
if options.pcapFile == None:
print parser.usage
exit(0)
pcapFile = options.pcapFile
f=open(pcapFile)
pcap = dpkt.pcap.Reader(f)
printPcap(pcap)
if __name__ == '__main__':
main()
|
|
78120151788b95d01ccf1e0e919572287e598758
|
openforcefield/tests/test_io.py
|
openforcefield/tests/test_io.py
|
#!/usr/bin/env python
#=============================================================================================
# MODULE DOCSTRING
#=============================================================================================
"""
Test classes and function in module openforcefield.typing.engines.smirnoff.io.
"""
#=============================================================================================
# GLOBAL IMPORTS
#=============================================================================================
import ast
import pytest
from simtk import unit
from openforcefield.typing.engines.smirnoff.io import (
_ast_unit_eval,
_extract_attached_units,
)
#=============================================================================================
# QUANTITY PARSING UTILITIES
#=============================================================================================
@pytest.mark.parametrize('unit_string,expected_unit',[
('kilocalories_per_mole', unit.kilocalories_per_mole),
('kilocalories_per_mole/angstrom**2', unit.kilocalories_per_mole/unit.angstrom**2),
('joule/(mole * nanometer**2)', unit.joule/(unit.mole * unit.nanometer**2)),
('picosecond**(-1)', unit.picosecond**(-1)),
('300.0 * kelvin', 300*unit.kelvin),
('1 * kilojoule + 500 * joule', 1.5*unit.kilojoule),
('1 / meter', 1.0 / unit.meter)
])
def test_ast_unit_eval(unit_string, expected_unit):
"""Test that _ast_unit_eval() correctly parses string quantities."""
ast_root_node = ast.parse(unit_string, mode='eval').body
parsed_units = _ast_unit_eval(ast_root_node)
assert parsed_units == expected_unit
@pytest.mark.parametrize('attributes,expected',[
({'not_parsed': 'blabla'},
{}),
({'not_parsed': 'blabla', 'attr_unit': 'angstrom/femtosecond'},
{'attr': unit.angstrom/unit.femtosecond}),
({'not_parsed': 'blabla', 'attr1_unit': 'meter', 'attr2_unit': 'kilojoule_per_mole'},
{'attr1': unit.meter, 'attr2': unit.kilojoule_per_mole}),
])
def test_extract_attached_units(attributes, expected):
"""Test that _extract_attached_units() correctly parses the correct."""
assert _extract_attached_units(attributes) == expected
@pytest.mark.parametrize('attributes',[
{'attr_unit': '300.0 * kelvin'},
{'attr_unit': '1 / picosecond'}
])
def test_extract_attached_units_raises(attributes):
"""Test that _extract_attached_units() raises an error when a quantity is specified instead of a unit."""
with pytest.raises(ValueError, match='associated to a quantity rather than only units'):
_extract_attached_units(attributes)
|
Add tests quantity from string parsing
|
Add tests quantity from string parsing
|
Python
|
mit
|
openforcefield/openff-toolkit,open-forcefield-group/openforcefield,openforcefield/openff-toolkit,open-forcefield-group/openforcefield,open-forcefield-group/openforcefield
|
Add tests quantity from string parsing
|
#!/usr/bin/env python
#=============================================================================================
# MODULE DOCSTRING
#=============================================================================================
"""
Test classes and function in module openforcefield.typing.engines.smirnoff.io.
"""
#=============================================================================================
# GLOBAL IMPORTS
#=============================================================================================
import ast
import pytest
from simtk import unit
from openforcefield.typing.engines.smirnoff.io import (
_ast_unit_eval,
_extract_attached_units,
)
#=============================================================================================
# QUANTITY PARSING UTILITIES
#=============================================================================================
@pytest.mark.parametrize('unit_string,expected_unit',[
('kilocalories_per_mole', unit.kilocalories_per_mole),
('kilocalories_per_mole/angstrom**2', unit.kilocalories_per_mole/unit.angstrom**2),
('joule/(mole * nanometer**2)', unit.joule/(unit.mole * unit.nanometer**2)),
('picosecond**(-1)', unit.picosecond**(-1)),
('300.0 * kelvin', 300*unit.kelvin),
('1 * kilojoule + 500 * joule', 1.5*unit.kilojoule),
('1 / meter', 1.0 / unit.meter)
])
def test_ast_unit_eval(unit_string, expected_unit):
"""Test that _ast_unit_eval() correctly parses string quantities."""
ast_root_node = ast.parse(unit_string, mode='eval').body
parsed_units = _ast_unit_eval(ast_root_node)
assert parsed_units == expected_unit
@pytest.mark.parametrize('attributes,expected',[
({'not_parsed': 'blabla'},
{}),
({'not_parsed': 'blabla', 'attr_unit': 'angstrom/femtosecond'},
{'attr': unit.angstrom/unit.femtosecond}),
({'not_parsed': 'blabla', 'attr1_unit': 'meter', 'attr2_unit': 'kilojoule_per_mole'},
{'attr1': unit.meter, 'attr2': unit.kilojoule_per_mole}),
])
def test_extract_attached_units(attributes, expected):
"""Test that _extract_attached_units() correctly parses the correct."""
assert _extract_attached_units(attributes) == expected
@pytest.mark.parametrize('attributes',[
{'attr_unit': '300.0 * kelvin'},
{'attr_unit': '1 / picosecond'}
])
def test_extract_attached_units_raises(attributes):
"""Test that _extract_attached_units() raises an error when a quantity is specified instead of a unit."""
with pytest.raises(ValueError, match='associated to a quantity rather than only units'):
_extract_attached_units(attributes)
|
<commit_before><commit_msg>Add tests quantity from string parsing<commit_after>
|
#!/usr/bin/env python
#=============================================================================================
# MODULE DOCSTRING
#=============================================================================================
"""
Test classes and function in module openforcefield.typing.engines.smirnoff.io.
"""
#=============================================================================================
# GLOBAL IMPORTS
#=============================================================================================
import ast
import pytest
from simtk import unit
from openforcefield.typing.engines.smirnoff.io import (
_ast_unit_eval,
_extract_attached_units,
)
#=============================================================================================
# QUANTITY PARSING UTILITIES
#=============================================================================================
@pytest.mark.parametrize('unit_string,expected_unit',[
('kilocalories_per_mole', unit.kilocalories_per_mole),
('kilocalories_per_mole/angstrom**2', unit.kilocalories_per_mole/unit.angstrom**2),
('joule/(mole * nanometer**2)', unit.joule/(unit.mole * unit.nanometer**2)),
('picosecond**(-1)', unit.picosecond**(-1)),
('300.0 * kelvin', 300*unit.kelvin),
('1 * kilojoule + 500 * joule', 1.5*unit.kilojoule),
('1 / meter', 1.0 / unit.meter)
])
def test_ast_unit_eval(unit_string, expected_unit):
"""Test that _ast_unit_eval() correctly parses string quantities."""
ast_root_node = ast.parse(unit_string, mode='eval').body
parsed_units = _ast_unit_eval(ast_root_node)
assert parsed_units == expected_unit
@pytest.mark.parametrize('attributes,expected',[
({'not_parsed': 'blabla'},
{}),
({'not_parsed': 'blabla', 'attr_unit': 'angstrom/femtosecond'},
{'attr': unit.angstrom/unit.femtosecond}),
({'not_parsed': 'blabla', 'attr1_unit': 'meter', 'attr2_unit': 'kilojoule_per_mole'},
{'attr1': unit.meter, 'attr2': unit.kilojoule_per_mole}),
])
def test_extract_attached_units(attributes, expected):
"""Test that _extract_attached_units() correctly parses the correct."""
assert _extract_attached_units(attributes) == expected
@pytest.mark.parametrize('attributes',[
{'attr_unit': '300.0 * kelvin'},
{'attr_unit': '1 / picosecond'}
])
def test_extract_attached_units_raises(attributes):
"""Test that _extract_attached_units() raises an error when a quantity is specified instead of a unit."""
with pytest.raises(ValueError, match='associated to a quantity rather than only units'):
_extract_attached_units(attributes)
|
Add tests quantity from string parsing#!/usr/bin/env python
#=============================================================================================
# MODULE DOCSTRING
#=============================================================================================
"""
Test classes and function in module openforcefield.typing.engines.smirnoff.io.
"""
#=============================================================================================
# GLOBAL IMPORTS
#=============================================================================================
import ast
import pytest
from simtk import unit
from openforcefield.typing.engines.smirnoff.io import (
_ast_unit_eval,
_extract_attached_units,
)
#=============================================================================================
# QUANTITY PARSING UTILITIES
#=============================================================================================
@pytest.mark.parametrize('unit_string,expected_unit',[
('kilocalories_per_mole', unit.kilocalories_per_mole),
('kilocalories_per_mole/angstrom**2', unit.kilocalories_per_mole/unit.angstrom**2),
('joule/(mole * nanometer**2)', unit.joule/(unit.mole * unit.nanometer**2)),
('picosecond**(-1)', unit.picosecond**(-1)),
('300.0 * kelvin', 300*unit.kelvin),
('1 * kilojoule + 500 * joule', 1.5*unit.kilojoule),
('1 / meter', 1.0 / unit.meter)
])
def test_ast_unit_eval(unit_string, expected_unit):
"""Test that _ast_unit_eval() correctly parses string quantities."""
ast_root_node = ast.parse(unit_string, mode='eval').body
parsed_units = _ast_unit_eval(ast_root_node)
assert parsed_units == expected_unit
@pytest.mark.parametrize('attributes,expected',[
({'not_parsed': 'blabla'},
{}),
({'not_parsed': 'blabla', 'attr_unit': 'angstrom/femtosecond'},
{'attr': unit.angstrom/unit.femtosecond}),
({'not_parsed': 'blabla', 'attr1_unit': 'meter', 'attr2_unit': 'kilojoule_per_mole'},
{'attr1': unit.meter, 'attr2': unit.kilojoule_per_mole}),
])
def test_extract_attached_units(attributes, expected):
"""Test that _extract_attached_units() correctly parses the correct."""
assert _extract_attached_units(attributes) == expected
@pytest.mark.parametrize('attributes',[
{'attr_unit': '300.0 * kelvin'},
{'attr_unit': '1 / picosecond'}
])
def test_extract_attached_units_raises(attributes):
"""Test that _extract_attached_units() raises an error when a quantity is specified instead of a unit."""
with pytest.raises(ValueError, match='associated to a quantity rather than only units'):
_extract_attached_units(attributes)
|
<commit_before><commit_msg>Add tests quantity from string parsing<commit_after>#!/usr/bin/env python
#=============================================================================================
# MODULE DOCSTRING
#=============================================================================================
"""
Test classes and function in module openforcefield.typing.engines.smirnoff.io.
"""
#=============================================================================================
# GLOBAL IMPORTS
#=============================================================================================
import ast
import pytest
from simtk import unit
from openforcefield.typing.engines.smirnoff.io import (
_ast_unit_eval,
_extract_attached_units,
)
#=============================================================================================
# QUANTITY PARSING UTILITIES
#=============================================================================================
@pytest.mark.parametrize('unit_string,expected_unit',[
('kilocalories_per_mole', unit.kilocalories_per_mole),
('kilocalories_per_mole/angstrom**2', unit.kilocalories_per_mole/unit.angstrom**2),
('joule/(mole * nanometer**2)', unit.joule/(unit.mole * unit.nanometer**2)),
('picosecond**(-1)', unit.picosecond**(-1)),
('300.0 * kelvin', 300*unit.kelvin),
('1 * kilojoule + 500 * joule', 1.5*unit.kilojoule),
('1 / meter', 1.0 / unit.meter)
])
def test_ast_unit_eval(unit_string, expected_unit):
"""Test that _ast_unit_eval() correctly parses string quantities."""
ast_root_node = ast.parse(unit_string, mode='eval').body
parsed_units = _ast_unit_eval(ast_root_node)
assert parsed_units == expected_unit
@pytest.mark.parametrize('attributes,expected',[
({'not_parsed': 'blabla'},
{}),
({'not_parsed': 'blabla', 'attr_unit': 'angstrom/femtosecond'},
{'attr': unit.angstrom/unit.femtosecond}),
({'not_parsed': 'blabla', 'attr1_unit': 'meter', 'attr2_unit': 'kilojoule_per_mole'},
{'attr1': unit.meter, 'attr2': unit.kilojoule_per_mole}),
])
def test_extract_attached_units(attributes, expected):
"""Test that _extract_attached_units() correctly parses the correct."""
assert _extract_attached_units(attributes) == expected
@pytest.mark.parametrize('attributes',[
{'attr_unit': '300.0 * kelvin'},
{'attr_unit': '1 / picosecond'}
])
def test_extract_attached_units_raises(attributes):
"""Test that _extract_attached_units() raises an error when a quantity is specified instead of a unit."""
with pytest.raises(ValueError, match='associated to a quantity rather than only units'):
_extract_attached_units(attributes)
|
|
138c6263a3b3a3c24f5fe6b4f300542c74228448
|
src/btc_inference_cae.py
|
src/btc_inference_cae.py
|
import os
import numpy as np
import tensorflow as tf
from btc_settings import *
from btc_train import BTCTrain
import matplotlib.pyplot as plt
from btc_cae_parameters import get_parameters
class BTCInferenceCAE(BTCTrain):
def __init__(self, paras, input_path, model_path):
super().__init__(paras)
self.model_path = model_path
self.input = np.load(input_path)
self.network = self.models.autoencoder
self._inference()
return
def _compare(self, xr):
plt.figure(num="compare")
for i in range(4):
plt.subplot(2, 4, 2 * i + 1)
plt.title("original " + str(i))
plt.axis("off")
plt.imshow(self.input[..., i], cmap="gray")
plt.subplot(2, 4, 2 * i + 2)
plt.title("recontruction " + str(i))
plt.axis("off")
plt.imshow(xr[..., i], cmap="gray")
plt.show()
return
def _inference(self):
x = tf.placeholder(tf.float32, [1] + self.patch_shape)
is_training = tf.placeholder_with_default(False, [])
_, r = self.network(x, is_training)
loader = tf.train.Saver()
sess = tf.InteractiveSession()
loader.restore(sess, self.model_path)
xr = sess.run([r], feed_dict={x: np.reshape(self.input, [1] + self.patch_shape)})
xr = np.reshape(np.array(xr), self.patch_shape)
self._compare(xr)
return
if __name__ == "__main__":
parameters = get_parameters("cae", "slice", "kl")
input_path = "/home/user4/btc/data/Slices/TCGA-CS-4944/0_1.npy"
model_path = "/home/user4/btc/models/cae_2D_pool_kl/last/model"
BTCInferenceCAE(parameters, input_path, model_path)
|
Add script to do inferencer of autoencoder model
|
Add script to do inferencer of autoencoder model
|
Python
|
mit
|
quqixun/BrainTumorClassification,quqixun/BrainTumorClassification
|
Add script to do inferencer of autoencoder model
|
import os
import numpy as np
import tensorflow as tf
from btc_settings import *
from btc_train import BTCTrain
import matplotlib.pyplot as plt
from btc_cae_parameters import get_parameters
class BTCInferenceCAE(BTCTrain):
def __init__(self, paras, input_path, model_path):
super().__init__(paras)
self.model_path = model_path
self.input = np.load(input_path)
self.network = self.models.autoencoder
self._inference()
return
def _compare(self, xr):
plt.figure(num="compare")
for i in range(4):
plt.subplot(2, 4, 2 * i + 1)
plt.title("original " + str(i))
plt.axis("off")
plt.imshow(self.input[..., i], cmap="gray")
plt.subplot(2, 4, 2 * i + 2)
plt.title("recontruction " + str(i))
plt.axis("off")
plt.imshow(xr[..., i], cmap="gray")
plt.show()
return
def _inference(self):
x = tf.placeholder(tf.float32, [1] + self.patch_shape)
is_training = tf.placeholder_with_default(False, [])
_, r = self.network(x, is_training)
loader = tf.train.Saver()
sess = tf.InteractiveSession()
loader.restore(sess, self.model_path)
xr = sess.run([r], feed_dict={x: np.reshape(self.input, [1] + self.patch_shape)})
xr = np.reshape(np.array(xr), self.patch_shape)
self._compare(xr)
return
if __name__ == "__main__":
parameters = get_parameters("cae", "slice", "kl")
input_path = "/home/user4/btc/data/Slices/TCGA-CS-4944/0_1.npy"
model_path = "/home/user4/btc/models/cae_2D_pool_kl/last/model"
BTCInferenceCAE(parameters, input_path, model_path)
|
<commit_before><commit_msg>Add script to do inferencer of autoencoder model<commit_after>
|
import os
import numpy as np
import tensorflow as tf
from btc_settings import *
from btc_train import BTCTrain
import matplotlib.pyplot as plt
from btc_cae_parameters import get_parameters
class BTCInferenceCAE(BTCTrain):
def __init__(self, paras, input_path, model_path):
super().__init__(paras)
self.model_path = model_path
self.input = np.load(input_path)
self.network = self.models.autoencoder
self._inference()
return
def _compare(self, xr):
plt.figure(num="compare")
for i in range(4):
plt.subplot(2, 4, 2 * i + 1)
plt.title("original " + str(i))
plt.axis("off")
plt.imshow(self.input[..., i], cmap="gray")
plt.subplot(2, 4, 2 * i + 2)
plt.title("recontruction " + str(i))
plt.axis("off")
plt.imshow(xr[..., i], cmap="gray")
plt.show()
return
def _inference(self):
x = tf.placeholder(tf.float32, [1] + self.patch_shape)
is_training = tf.placeholder_with_default(False, [])
_, r = self.network(x, is_training)
loader = tf.train.Saver()
sess = tf.InteractiveSession()
loader.restore(sess, self.model_path)
xr = sess.run([r], feed_dict={x: np.reshape(self.input, [1] + self.patch_shape)})
xr = np.reshape(np.array(xr), self.patch_shape)
self._compare(xr)
return
if __name__ == "__main__":
parameters = get_parameters("cae", "slice", "kl")
input_path = "/home/user4/btc/data/Slices/TCGA-CS-4944/0_1.npy"
model_path = "/home/user4/btc/models/cae_2D_pool_kl/last/model"
BTCInferenceCAE(parameters, input_path, model_path)
|
Add script to do inferencer of autoencoder modelimport os
import numpy as np
import tensorflow as tf
from btc_settings import *
from btc_train import BTCTrain
import matplotlib.pyplot as plt
from btc_cae_parameters import get_parameters
class BTCInferenceCAE(BTCTrain):
def __init__(self, paras, input_path, model_path):
super().__init__(paras)
self.model_path = model_path
self.input = np.load(input_path)
self.network = self.models.autoencoder
self._inference()
return
def _compare(self, xr):
plt.figure(num="compare")
for i in range(4):
plt.subplot(2, 4, 2 * i + 1)
plt.title("original " + str(i))
plt.axis("off")
plt.imshow(self.input[..., i], cmap="gray")
plt.subplot(2, 4, 2 * i + 2)
plt.title("recontruction " + str(i))
plt.axis("off")
plt.imshow(xr[..., i], cmap="gray")
plt.show()
return
def _inference(self):
x = tf.placeholder(tf.float32, [1] + self.patch_shape)
is_training = tf.placeholder_with_default(False, [])
_, r = self.network(x, is_training)
loader = tf.train.Saver()
sess = tf.InteractiveSession()
loader.restore(sess, self.model_path)
xr = sess.run([r], feed_dict={x: np.reshape(self.input, [1] + self.patch_shape)})
xr = np.reshape(np.array(xr), self.patch_shape)
self._compare(xr)
return
if __name__ == "__main__":
parameters = get_parameters("cae", "slice", "kl")
input_path = "/home/user4/btc/data/Slices/TCGA-CS-4944/0_1.npy"
model_path = "/home/user4/btc/models/cae_2D_pool_kl/last/model"
BTCInferenceCAE(parameters, input_path, model_path)
|
<commit_before><commit_msg>Add script to do inferencer of autoencoder model<commit_after>import os
import numpy as np
import tensorflow as tf
from btc_settings import *
from btc_train import BTCTrain
import matplotlib.pyplot as plt
from btc_cae_parameters import get_parameters
class BTCInferenceCAE(BTCTrain):
def __init__(self, paras, input_path, model_path):
super().__init__(paras)
self.model_path = model_path
self.input = np.load(input_path)
self.network = self.models.autoencoder
self._inference()
return
def _compare(self, xr):
plt.figure(num="compare")
for i in range(4):
plt.subplot(2, 4, 2 * i + 1)
plt.title("original " + str(i))
plt.axis("off")
plt.imshow(self.input[..., i], cmap="gray")
plt.subplot(2, 4, 2 * i + 2)
plt.title("recontruction " + str(i))
plt.axis("off")
plt.imshow(xr[..., i], cmap="gray")
plt.show()
return
def _inference(self):
x = tf.placeholder(tf.float32, [1] + self.patch_shape)
is_training = tf.placeholder_with_default(False, [])
_, r = self.network(x, is_training)
loader = tf.train.Saver()
sess = tf.InteractiveSession()
loader.restore(sess, self.model_path)
xr = sess.run([r], feed_dict={x: np.reshape(self.input, [1] + self.patch_shape)})
xr = np.reshape(np.array(xr), self.patch_shape)
self._compare(xr)
return
if __name__ == "__main__":
parameters = get_parameters("cae", "slice", "kl")
input_path = "/home/user4/btc/data/Slices/TCGA-CS-4944/0_1.npy"
model_path = "/home/user4/btc/models/cae_2D_pool_kl/last/model"
BTCInferenceCAE(parameters, input_path, model_path)
|
|
980598a458d738186abf0d702535a42f121d8c85
|
PatternCreate/pattern_create.py
|
PatternCreate/pattern_create.py
|
import sys, string, re
arguments = []
textString = []
program = True
number = ['a','a','a']
def get_pattern(stuff):
first = "abcdefghijklmnopqrstuvwxyz0123456789"
next = "bcdefghijklmnopqrstuvwxyz0123456789a"
table = string.maketrans(first, next)
textString.append("".join(number))
for run in range(stuff-1):
if number[2] == '9':
nextNumber = string.translate(number[1], table)
number[1] = nextNumber
if number[1] == '9' and number[2] == '9':
nextNumber = string.translate(number[0], table)
number[0] = nextNumber
nextNumber = string.translate(number[2], table)
number[2] = nextNumber
textString.append("".join(number))
return textString
while program:
for arg in sys.argv:
if len(sys.argv) != 3:
print "You must put two arguments:"
print "'make' for make pattern, and byte length"
print "or 'find' for find displacement, and byte pattern found."
program = False
break
else:
arguments.append(arg)
if program and arguments[1] == 'make':
bytes = int(round((int(arguments[2]) / 3) + 0.5))
strLen = int(arguments[2])
myFile = open('pattern.txt', 'w')
tempString = ("".join(get_pattern(bytes)))
if len(tempString) > strLen:
chop = len(tempString) - strLen
myFile.write(tempString[:-chop])
myFile.close()
print "Your pattern has been made."
print "See the 'pattern.txt' file in this directory."
program = False
elif program and arguments[1] == 'find':
bytes = arguments[2]
myFile = open('pattern.txt', 'r')
testString = myFile.read()
marker = [m.start(0) for m in re.finditer(bytes, testString)]
print 'The first character int your string is the {0}th number in the file'.format(marker[0] + 1)
program = False
elif program:
print "Your input was not understood."
print "You must put two arguments:"
print "'make' for make pattern, and byte length"
print "or 'find' for find byte offset, and byte pattern found."
program = False
|
Add Exploit Dev Pattern Create
|
Add Exploit Dev Pattern Create
|
Python
|
cc0-1.0
|
JohnTroony/Scriptology,JohnTroony/Scriptology,JohnTroony/Scriptology,JohnTroony/Scriptology,JohnTroony/Scriptology,JohnTroony/Scriptology
|
Add Exploit Dev Pattern Create
|
import sys, string, re
arguments = []
textString = []
program = True
number = ['a','a','a']
def get_pattern(stuff):
first = "abcdefghijklmnopqrstuvwxyz0123456789"
next = "bcdefghijklmnopqrstuvwxyz0123456789a"
table = string.maketrans(first, next)
textString.append("".join(number))
for run in range(stuff-1):
if number[2] == '9':
nextNumber = string.translate(number[1], table)
number[1] = nextNumber
if number[1] == '9' and number[2] == '9':
nextNumber = string.translate(number[0], table)
number[0] = nextNumber
nextNumber = string.translate(number[2], table)
number[2] = nextNumber
textString.append("".join(number))
return textString
while program:
for arg in sys.argv:
if len(sys.argv) != 3:
print "You must put two arguments:"
print "'make' for make pattern, and byte length"
print "or 'find' for find displacement, and byte pattern found."
program = False
break
else:
arguments.append(arg)
if program and arguments[1] == 'make':
bytes = int(round((int(arguments[2]) / 3) + 0.5))
strLen = int(arguments[2])
myFile = open('pattern.txt', 'w')
tempString = ("".join(get_pattern(bytes)))
if len(tempString) > strLen:
chop = len(tempString) - strLen
myFile.write(tempString[:-chop])
myFile.close()
print "Your pattern has been made."
print "See the 'pattern.txt' file in this directory."
program = False
elif program and arguments[1] == 'find':
bytes = arguments[2]
myFile = open('pattern.txt', 'r')
testString = myFile.read()
marker = [m.start(0) for m in re.finditer(bytes, testString)]
print 'The first character int your string is the {0}th number in the file'.format(marker[0] + 1)
program = False
elif program:
print "Your input was not understood."
print "You must put two arguments:"
print "'make' for make pattern, and byte length"
print "or 'find' for find byte offset, and byte pattern found."
program = False
|
<commit_before><commit_msg>Add Exploit Dev Pattern Create<commit_after>
|
import sys, string, re
arguments = []
textString = []
program = True
number = ['a','a','a']
def get_pattern(stuff):
first = "abcdefghijklmnopqrstuvwxyz0123456789"
next = "bcdefghijklmnopqrstuvwxyz0123456789a"
table = string.maketrans(first, next)
textString.append("".join(number))
for run in range(stuff-1):
if number[2] == '9':
nextNumber = string.translate(number[1], table)
number[1] = nextNumber
if number[1] == '9' and number[2] == '9':
nextNumber = string.translate(number[0], table)
number[0] = nextNumber
nextNumber = string.translate(number[2], table)
number[2] = nextNumber
textString.append("".join(number))
return textString
while program:
for arg in sys.argv:
if len(sys.argv) != 3:
print "You must put two arguments:"
print "'make' for make pattern, and byte length"
print "or 'find' for find displacement, and byte pattern found."
program = False
break
else:
arguments.append(arg)
if program and arguments[1] == 'make':
bytes = int(round((int(arguments[2]) / 3) + 0.5))
strLen = int(arguments[2])
myFile = open('pattern.txt', 'w')
tempString = ("".join(get_pattern(bytes)))
if len(tempString) > strLen:
chop = len(tempString) - strLen
myFile.write(tempString[:-chop])
myFile.close()
print "Your pattern has been made."
print "See the 'pattern.txt' file in this directory."
program = False
elif program and arguments[1] == 'find':
bytes = arguments[2]
myFile = open('pattern.txt', 'r')
testString = myFile.read()
marker = [m.start(0) for m in re.finditer(bytes, testString)]
print 'The first character int your string is the {0}th number in the file'.format(marker[0] + 1)
program = False
elif program:
print "Your input was not understood."
print "You must put two arguments:"
print "'make' for make pattern, and byte length"
print "or 'find' for find byte offset, and byte pattern found."
program = False
|
Add Exploit Dev Pattern Createimport sys, string, re
arguments = []
textString = []
program = True
number = ['a','a','a']
def get_pattern(stuff):
first = "abcdefghijklmnopqrstuvwxyz0123456789"
next = "bcdefghijklmnopqrstuvwxyz0123456789a"
table = string.maketrans(first, next)
textString.append("".join(number))
for run in range(stuff-1):
if number[2] == '9':
nextNumber = string.translate(number[1], table)
number[1] = nextNumber
if number[1] == '9' and number[2] == '9':
nextNumber = string.translate(number[0], table)
number[0] = nextNumber
nextNumber = string.translate(number[2], table)
number[2] = nextNumber
textString.append("".join(number))
return textString
while program:
for arg in sys.argv:
if len(sys.argv) != 3:
print "You must put two arguments:"
print "'make' for make pattern, and byte length"
print "or 'find' for find displacement, and byte pattern found."
program = False
break
else:
arguments.append(arg)
if program and arguments[1] == 'make':
bytes = int(round((int(arguments[2]) / 3) + 0.5))
strLen = int(arguments[2])
myFile = open('pattern.txt', 'w')
tempString = ("".join(get_pattern(bytes)))
if len(tempString) > strLen:
chop = len(tempString) - strLen
myFile.write(tempString[:-chop])
myFile.close()
print "Your pattern has been made."
print "See the 'pattern.txt' file in this directory."
program = False
elif program and arguments[1] == 'find':
bytes = arguments[2]
myFile = open('pattern.txt', 'r')
testString = myFile.read()
marker = [m.start(0) for m in re.finditer(bytes, testString)]
print 'The first character int your string is the {0}th number in the file'.format(marker[0] + 1)
program = False
elif program:
print "Your input was not understood."
print "You must put two arguments:"
print "'make' for make pattern, and byte length"
print "or 'find' for find byte offset, and byte pattern found."
program = False
|
<commit_before><commit_msg>Add Exploit Dev Pattern Create<commit_after>import sys, string, re
arguments = []
textString = []
program = True
number = ['a','a','a']
def get_pattern(stuff):
first = "abcdefghijklmnopqrstuvwxyz0123456789"
next = "bcdefghijklmnopqrstuvwxyz0123456789a"
table = string.maketrans(first, next)
textString.append("".join(number))
for run in range(stuff-1):
if number[2] == '9':
nextNumber = string.translate(number[1], table)
number[1] = nextNumber
if number[1] == '9' and number[2] == '9':
nextNumber = string.translate(number[0], table)
number[0] = nextNumber
nextNumber = string.translate(number[2], table)
number[2] = nextNumber
textString.append("".join(number))
return textString
while program:
for arg in sys.argv:
if len(sys.argv) != 3:
print "You must put two arguments:"
print "'make' for make pattern, and byte length"
print "or 'find' for find displacement, and byte pattern found."
program = False
break
else:
arguments.append(arg)
if program and arguments[1] == 'make':
bytes = int(round((int(arguments[2]) / 3) + 0.5))
strLen = int(arguments[2])
myFile = open('pattern.txt', 'w')
tempString = ("".join(get_pattern(bytes)))
if len(tempString) > strLen:
chop = len(tempString) - strLen
myFile.write(tempString[:-chop])
myFile.close()
print "Your pattern has been made."
print "See the 'pattern.txt' file in this directory."
program = False
elif program and arguments[1] == 'find':
bytes = arguments[2]
myFile = open('pattern.txt', 'r')
testString = myFile.read()
marker = [m.start(0) for m in re.finditer(bytes, testString)]
print 'The first character int your string is the {0}th number in the file'.format(marker[0] + 1)
program = False
elif program:
print "Your input was not understood."
print "You must put two arguments:"
print "'make' for make pattern, and byte length"
print "or 'find' for find byte offset, and byte pattern found."
program = False
|
|
d2a25e14c9f09139f7d7279465afc34f321902c6
|
smartpy/__init__.py
|
smartpy/__init__.py
|
from .interfaces.model import Model
from .interfaces.dataset import Dataset
from .trainer import Trainer
import tasks.tasks as tasks
|
from .interfaces.model import Model
from .interfaces.dataset import Dataset
from .trainer import Trainer
from .tasks import tasks
|
Use relative import to import tasks
|
Use relative import to import tasks
|
Python
|
bsd-3-clause
|
MarcCote/smartlearner,SMART-Lab/smartlearner,SMART-Lab/smartpy,havaeimo/smartlearner,ASalvail/smartlearner
|
from .interfaces.model import Model
from .interfaces.dataset import Dataset
from .trainer import Trainer
import tasks.tasks as tasks
Use relative import to import tasks
|
from .interfaces.model import Model
from .interfaces.dataset import Dataset
from .trainer import Trainer
from .tasks import tasks
|
<commit_before>from .interfaces.model import Model
from .interfaces.dataset import Dataset
from .trainer import Trainer
import tasks.tasks as tasks
<commit_msg>Use relative import to import tasks<commit_after>
|
from .interfaces.model import Model
from .interfaces.dataset import Dataset
from .trainer import Trainer
from .tasks import tasks
|
from .interfaces.model import Model
from .interfaces.dataset import Dataset
from .trainer import Trainer
import tasks.tasks as tasks
Use relative import to import tasksfrom .interfaces.model import Model
from .interfaces.dataset import Dataset
from .trainer import Trainer
from .tasks import tasks
|
<commit_before>from .interfaces.model import Model
from .interfaces.dataset import Dataset
from .trainer import Trainer
import tasks.tasks as tasks
<commit_msg>Use relative import to import tasks<commit_after>from .interfaces.model import Model
from .interfaces.dataset import Dataset
from .trainer import Trainer
from .tasks import tasks
|
0244217c57686d53e7a0aebef4d0dd328cf809da
|
trunk/examples/mesonet_oban.py
|
trunk/examples/mesonet_oban.py
|
import scipy.constants as sconsts
import matplotlib.pyplot as plt
from matplotlib.mlab import griddata
from mpl_toolkits.basemap import Basemap
from metpy import read_mesonet_data, dewpoint, get_wind_components
from metpy.constants import C2F
from metpy.cbook import append_fields
from metpy.vis import station_plot
from metpy.tools.oban import gaussian_filter
# stereogrpaphic projection
data = read_mesonet_data('data/200811210030.mdf',
fields=('STID', 'TIME', 'TAIR', 'RELH', 'WSPD', 'WDIR'))
#Calculate dewpoint in F from relative humidity and temperature
dewpt = C2F(dewpoint(data['TAIR'], data['RELH']/100.))
data = append_fields(data, ('dewpoint',), (dewpt,))
#Convert temperature and dewpoint to Farenheit
data['TAIR'] = C2F(data['TAIR'])
#Convert wind speeds to MPH
data['WSPD'] *= sconsts.hour / sconsts.mile
u,v = get_wind_components(data['WSPD'], data['WDIR'])
data = append_fields(data, ('u', 'v'), (u, v))
fig = plt.figure(figsize=(20,12))
ax = fig.add_subplot(1,1,1)
m = Basemap(lon_0=-99, lat_0=35, lat_ts=35, resolution='i',
projection='stere', urcrnrlat=37., urcrnrlon=-94.25, llcrnrlat=33.7,
llcrnrlon=-103., ax=ax)
m.bluemarble()
#Objectively analyze dewpoint
lon_grid, lat_grid, x_grid, y_grid = m.makegrid(125, 50, returnxy=True)
x,y = m(data['longitude'], data['latitude'])
dew_grid = griddata(x, y, data['dewpoint'], x_grid, y_grid)
dew_grid = gaussian_filter(x_grid.T, y_grid.T, dew_grid.T, 10000, 10000)
plt.pcolor(x_grid.T, y_grid.T, dew_grid, zorder=0, cmap=plt.get_cmap('Greens'),
antialiased=False)
station_plot(data, ax=ax, proj=m,
styles=dict(dewpoint=dict(color='lightgreen')), zorder=10)
m.drawstates(ax=ax, zorder=1)
plt.title(data['datetime'][0].strftime('%H%MZ %d %b %Y'))
plt.show()
|
Add an example showing how to use some of the oban functions.
|
Add an example showing how to use some of the oban functions.
git-svn-id: acf0ef94bfce630b1a882387fc03ab8593ec6522@295 150532fb-1d5b-0410-a8ab-efec50f980d4
|
Python
|
bsd-3-clause
|
deeplycloudy/MetPy,jrleeman/MetPy,jrleeman/MetPy,ahaberlie/MetPy,dopplershift/MetPy,ahill818/MetPy,ahaberlie/MetPy,Unidata/MetPy,Unidata/MetPy,dopplershift/MetPy,ShawnMurd/MetPy
|
Add an example showing how to use some of the oban functions.
git-svn-id: acf0ef94bfce630b1a882387fc03ab8593ec6522@295 150532fb-1d5b-0410-a8ab-efec50f980d4
|
import scipy.constants as sconsts
import matplotlib.pyplot as plt
from matplotlib.mlab import griddata
from mpl_toolkits.basemap import Basemap
from metpy import read_mesonet_data, dewpoint, get_wind_components
from metpy.constants import C2F
from metpy.cbook import append_fields
from metpy.vis import station_plot
from metpy.tools.oban import gaussian_filter
# stereogrpaphic projection
data = read_mesonet_data('data/200811210030.mdf',
fields=('STID', 'TIME', 'TAIR', 'RELH', 'WSPD', 'WDIR'))
#Calculate dewpoint in F from relative humidity and temperature
dewpt = C2F(dewpoint(data['TAIR'], data['RELH']/100.))
data = append_fields(data, ('dewpoint',), (dewpt,))
#Convert temperature and dewpoint to Farenheit
data['TAIR'] = C2F(data['TAIR'])
#Convert wind speeds to MPH
data['WSPD'] *= sconsts.hour / sconsts.mile
u,v = get_wind_components(data['WSPD'], data['WDIR'])
data = append_fields(data, ('u', 'v'), (u, v))
fig = plt.figure(figsize=(20,12))
ax = fig.add_subplot(1,1,1)
m = Basemap(lon_0=-99, lat_0=35, lat_ts=35, resolution='i',
projection='stere', urcrnrlat=37., urcrnrlon=-94.25, llcrnrlat=33.7,
llcrnrlon=-103., ax=ax)
m.bluemarble()
#Objectively analyze dewpoint
lon_grid, lat_grid, x_grid, y_grid = m.makegrid(125, 50, returnxy=True)
x,y = m(data['longitude'], data['latitude'])
dew_grid = griddata(x, y, data['dewpoint'], x_grid, y_grid)
dew_grid = gaussian_filter(x_grid.T, y_grid.T, dew_grid.T, 10000, 10000)
plt.pcolor(x_grid.T, y_grid.T, dew_grid, zorder=0, cmap=plt.get_cmap('Greens'),
antialiased=False)
station_plot(data, ax=ax, proj=m,
styles=dict(dewpoint=dict(color='lightgreen')), zorder=10)
m.drawstates(ax=ax, zorder=1)
plt.title(data['datetime'][0].strftime('%H%MZ %d %b %Y'))
plt.show()
|
<commit_before><commit_msg>Add an example showing how to use some of the oban functions.
git-svn-id: acf0ef94bfce630b1a882387fc03ab8593ec6522@295 150532fb-1d5b-0410-a8ab-efec50f980d4<commit_after>
|
import scipy.constants as sconsts
import matplotlib.pyplot as plt
from matplotlib.mlab import griddata
from mpl_toolkits.basemap import Basemap
from metpy import read_mesonet_data, dewpoint, get_wind_components
from metpy.constants import C2F
from metpy.cbook import append_fields
from metpy.vis import station_plot
from metpy.tools.oban import gaussian_filter
# stereogrpaphic projection
data = read_mesonet_data('data/200811210030.mdf',
fields=('STID', 'TIME', 'TAIR', 'RELH', 'WSPD', 'WDIR'))
#Calculate dewpoint in F from relative humidity and temperature
dewpt = C2F(dewpoint(data['TAIR'], data['RELH']/100.))
data = append_fields(data, ('dewpoint',), (dewpt,))
#Convert temperature and dewpoint to Farenheit
data['TAIR'] = C2F(data['TAIR'])
#Convert wind speeds to MPH
data['WSPD'] *= sconsts.hour / sconsts.mile
u,v = get_wind_components(data['WSPD'], data['WDIR'])
data = append_fields(data, ('u', 'v'), (u, v))
fig = plt.figure(figsize=(20,12))
ax = fig.add_subplot(1,1,1)
m = Basemap(lon_0=-99, lat_0=35, lat_ts=35, resolution='i',
projection='stere', urcrnrlat=37., urcrnrlon=-94.25, llcrnrlat=33.7,
llcrnrlon=-103., ax=ax)
m.bluemarble()
#Objectively analyze dewpoint
lon_grid, lat_grid, x_grid, y_grid = m.makegrid(125, 50, returnxy=True)
x,y = m(data['longitude'], data['latitude'])
dew_grid = griddata(x, y, data['dewpoint'], x_grid, y_grid)
dew_grid = gaussian_filter(x_grid.T, y_grid.T, dew_grid.T, 10000, 10000)
plt.pcolor(x_grid.T, y_grid.T, dew_grid, zorder=0, cmap=plt.get_cmap('Greens'),
antialiased=False)
station_plot(data, ax=ax, proj=m,
styles=dict(dewpoint=dict(color='lightgreen')), zorder=10)
m.drawstates(ax=ax, zorder=1)
plt.title(data['datetime'][0].strftime('%H%MZ %d %b %Y'))
plt.show()
|
Add an example showing how to use some of the oban functions.
git-svn-id: acf0ef94bfce630b1a882387fc03ab8593ec6522@295 150532fb-1d5b-0410-a8ab-efec50f980d4import scipy.constants as sconsts
import matplotlib.pyplot as plt
from matplotlib.mlab import griddata
from mpl_toolkits.basemap import Basemap
from metpy import read_mesonet_data, dewpoint, get_wind_components
from metpy.constants import C2F
from metpy.cbook import append_fields
from metpy.vis import station_plot
from metpy.tools.oban import gaussian_filter
# stereogrpaphic projection
data = read_mesonet_data('data/200811210030.mdf',
fields=('STID', 'TIME', 'TAIR', 'RELH', 'WSPD', 'WDIR'))
#Calculate dewpoint in F from relative humidity and temperature
dewpt = C2F(dewpoint(data['TAIR'], data['RELH']/100.))
data = append_fields(data, ('dewpoint',), (dewpt,))
#Convert temperature and dewpoint to Farenheit
data['TAIR'] = C2F(data['TAIR'])
#Convert wind speeds to MPH
data['WSPD'] *= sconsts.hour / sconsts.mile
u,v = get_wind_components(data['WSPD'], data['WDIR'])
data = append_fields(data, ('u', 'v'), (u, v))
fig = plt.figure(figsize=(20,12))
ax = fig.add_subplot(1,1,1)
m = Basemap(lon_0=-99, lat_0=35, lat_ts=35, resolution='i',
projection='stere', urcrnrlat=37., urcrnrlon=-94.25, llcrnrlat=33.7,
llcrnrlon=-103., ax=ax)
m.bluemarble()
#Objectively analyze dewpoint
lon_grid, lat_grid, x_grid, y_grid = m.makegrid(125, 50, returnxy=True)
x,y = m(data['longitude'], data['latitude'])
dew_grid = griddata(x, y, data['dewpoint'], x_grid, y_grid)
dew_grid = gaussian_filter(x_grid.T, y_grid.T, dew_grid.T, 10000, 10000)
plt.pcolor(x_grid.T, y_grid.T, dew_grid, zorder=0, cmap=plt.get_cmap('Greens'),
antialiased=False)
station_plot(data, ax=ax, proj=m,
styles=dict(dewpoint=dict(color='lightgreen')), zorder=10)
m.drawstates(ax=ax, zorder=1)
plt.title(data['datetime'][0].strftime('%H%MZ %d %b %Y'))
plt.show()
|
<commit_before><commit_msg>Add an example showing how to use some of the oban functions.
git-svn-id: acf0ef94bfce630b1a882387fc03ab8593ec6522@295 150532fb-1d5b-0410-a8ab-efec50f980d4<commit_after>import scipy.constants as sconsts
import matplotlib.pyplot as plt
from matplotlib.mlab import griddata
from mpl_toolkits.basemap import Basemap
from metpy import read_mesonet_data, dewpoint, get_wind_components
from metpy.constants import C2F
from metpy.cbook import append_fields
from metpy.vis import station_plot
from metpy.tools.oban import gaussian_filter
# stereogrpaphic projection
data = read_mesonet_data('data/200811210030.mdf',
fields=('STID', 'TIME', 'TAIR', 'RELH', 'WSPD', 'WDIR'))
#Calculate dewpoint in F from relative humidity and temperature
dewpt = C2F(dewpoint(data['TAIR'], data['RELH']/100.))
data = append_fields(data, ('dewpoint',), (dewpt,))
#Convert temperature and dewpoint to Farenheit
data['TAIR'] = C2F(data['TAIR'])
#Convert wind speeds to MPH
data['WSPD'] *= sconsts.hour / sconsts.mile
u,v = get_wind_components(data['WSPD'], data['WDIR'])
data = append_fields(data, ('u', 'v'), (u, v))
fig = plt.figure(figsize=(20,12))
ax = fig.add_subplot(1,1,1)
m = Basemap(lon_0=-99, lat_0=35, lat_ts=35, resolution='i',
projection='stere', urcrnrlat=37., urcrnrlon=-94.25, llcrnrlat=33.7,
llcrnrlon=-103., ax=ax)
m.bluemarble()
#Objectively analyze dewpoint
lon_grid, lat_grid, x_grid, y_grid = m.makegrid(125, 50, returnxy=True)
x,y = m(data['longitude'], data['latitude'])
dew_grid = griddata(x, y, data['dewpoint'], x_grid, y_grid)
dew_grid = gaussian_filter(x_grid.T, y_grid.T, dew_grid.T, 10000, 10000)
plt.pcolor(x_grid.T, y_grid.T, dew_grid, zorder=0, cmap=plt.get_cmap('Greens'),
antialiased=False)
station_plot(data, ax=ax, proj=m,
styles=dict(dewpoint=dict(color='lightgreen')), zorder=10)
m.drawstates(ax=ax, zorder=1)
plt.title(data['datetime'][0].strftime('%H%MZ %d %b %Y'))
plt.show()
|
|
c9d0b1522da83305dcfdfc82c28f2032162c0998
|
tests/test_compat.py
|
tests/test_compat.py
|
import pytest
from attr._compat import metadata_proxy
@pytest.fixture(name="mp")
def _mp():
return metadata_proxy({"x": 42, "y": "foo"})
class TestMetadataProxy:
"""
Ensure properties of metadata_proxy independently of hypothesis strategies.
"""
def test_repr(self, mp):
"""
repr makes sense and is consistent across Python versions.
"""
assert any(
[
"mappingproxy({'x': 42, 'y': 'foo'})" == repr(mp),
"mappingproxy({'y': 'foo', 'x': 42})" == repr(mp),
]
)
def test_immutable(self, mp):
"""
All mutating methods raise errors.
"""
with pytest.raises(TypeError, match="not support item assignment"):
mp["z"] = 23
with pytest.raises(TypeError, match="not support item deletion"):
del mp["x"]
with pytest.raises(AttributeError, match="no attribute 'update'"):
mp.update({})
with pytest.raises(AttributeError, match="no attribute 'clear'"):
mp.clear()
with pytest.raises(AttributeError, match="no attribute 'pop'"):
mp.pop("x")
with pytest.raises(AttributeError, match="no attribute 'popitem'"):
mp.popitem("x")
with pytest.raises(AttributeError, match="no attribute 'setdefault'"):
mp.setdefault("x")
|
Test metadata_proxy properties independently from hypothesis strategies
|
Test metadata_proxy properties independently from hypothesis strategies
Occasionally they fail to cover all bases and break our coverage job on Python 2.7.
Signed-off-by: Hynek Schlawack <6a253031d5fc83cdcd7910737760a89b7bcfb849@ox.cx>
|
Python
|
mit
|
python-attrs/attrs
|
Test metadata_proxy properties independently from hypothesis strategies
Occasionally they fail to cover all bases and break our coverage job on Python 2.7.
Signed-off-by: Hynek Schlawack <6a253031d5fc83cdcd7910737760a89b7bcfb849@ox.cx>
|
import pytest
from attr._compat import metadata_proxy
@pytest.fixture(name="mp")
def _mp():
return metadata_proxy({"x": 42, "y": "foo"})
class TestMetadataProxy:
"""
Ensure properties of metadata_proxy independently of hypothesis strategies.
"""
def test_repr(self, mp):
"""
repr makes sense and is consistent across Python versions.
"""
assert any(
[
"mappingproxy({'x': 42, 'y': 'foo'})" == repr(mp),
"mappingproxy({'y': 'foo', 'x': 42})" == repr(mp),
]
)
def test_immutable(self, mp):
"""
All mutating methods raise errors.
"""
with pytest.raises(TypeError, match="not support item assignment"):
mp["z"] = 23
with pytest.raises(TypeError, match="not support item deletion"):
del mp["x"]
with pytest.raises(AttributeError, match="no attribute 'update'"):
mp.update({})
with pytest.raises(AttributeError, match="no attribute 'clear'"):
mp.clear()
with pytest.raises(AttributeError, match="no attribute 'pop'"):
mp.pop("x")
with pytest.raises(AttributeError, match="no attribute 'popitem'"):
mp.popitem("x")
with pytest.raises(AttributeError, match="no attribute 'setdefault'"):
mp.setdefault("x")
|
<commit_before><commit_msg>Test metadata_proxy properties independently from hypothesis strategies
Occasionally they fail to cover all bases and break our coverage job on Python 2.7.
Signed-off-by: Hynek Schlawack <6a253031d5fc83cdcd7910737760a89b7bcfb849@ox.cx><commit_after>
|
import pytest
from attr._compat import metadata_proxy
@pytest.fixture(name="mp")
def _mp():
return metadata_proxy({"x": 42, "y": "foo"})
class TestMetadataProxy:
"""
Ensure properties of metadata_proxy independently of hypothesis strategies.
"""
def test_repr(self, mp):
"""
repr makes sense and is consistent across Python versions.
"""
assert any(
[
"mappingproxy({'x': 42, 'y': 'foo'})" == repr(mp),
"mappingproxy({'y': 'foo', 'x': 42})" == repr(mp),
]
)
def test_immutable(self, mp):
"""
All mutating methods raise errors.
"""
with pytest.raises(TypeError, match="not support item assignment"):
mp["z"] = 23
with pytest.raises(TypeError, match="not support item deletion"):
del mp["x"]
with pytest.raises(AttributeError, match="no attribute 'update'"):
mp.update({})
with pytest.raises(AttributeError, match="no attribute 'clear'"):
mp.clear()
with pytest.raises(AttributeError, match="no attribute 'pop'"):
mp.pop("x")
with pytest.raises(AttributeError, match="no attribute 'popitem'"):
mp.popitem("x")
with pytest.raises(AttributeError, match="no attribute 'setdefault'"):
mp.setdefault("x")
|
Test metadata_proxy properties independently from hypothesis strategies
Occasionally they fail to cover all bases and break our coverage job on Python 2.7.
Signed-off-by: Hynek Schlawack <6a253031d5fc83cdcd7910737760a89b7bcfb849@ox.cx>import pytest
from attr._compat import metadata_proxy
@pytest.fixture(name="mp")
def _mp():
return metadata_proxy({"x": 42, "y": "foo"})
class TestMetadataProxy:
"""
Ensure properties of metadata_proxy independently of hypothesis strategies.
"""
def test_repr(self, mp):
"""
repr makes sense and is consistent across Python versions.
"""
assert any(
[
"mappingproxy({'x': 42, 'y': 'foo'})" == repr(mp),
"mappingproxy({'y': 'foo', 'x': 42})" == repr(mp),
]
)
def test_immutable(self, mp):
"""
All mutating methods raise errors.
"""
with pytest.raises(TypeError, match="not support item assignment"):
mp["z"] = 23
with pytest.raises(TypeError, match="not support item deletion"):
del mp["x"]
with pytest.raises(AttributeError, match="no attribute 'update'"):
mp.update({})
with pytest.raises(AttributeError, match="no attribute 'clear'"):
mp.clear()
with pytest.raises(AttributeError, match="no attribute 'pop'"):
mp.pop("x")
with pytest.raises(AttributeError, match="no attribute 'popitem'"):
mp.popitem("x")
with pytest.raises(AttributeError, match="no attribute 'setdefault'"):
mp.setdefault("x")
|
<commit_before><commit_msg>Test metadata_proxy properties independently from hypothesis strategies
Occasionally they fail to cover all bases and break our coverage job on Python 2.7.
Signed-off-by: Hynek Schlawack <6a253031d5fc83cdcd7910737760a89b7bcfb849@ox.cx><commit_after>import pytest
from attr._compat import metadata_proxy
@pytest.fixture(name="mp")
def _mp():
return metadata_proxy({"x": 42, "y": "foo"})
class TestMetadataProxy:
"""
Ensure properties of metadata_proxy independently of hypothesis strategies.
"""
def test_repr(self, mp):
"""
repr makes sense and is consistent across Python versions.
"""
assert any(
[
"mappingproxy({'x': 42, 'y': 'foo'})" == repr(mp),
"mappingproxy({'y': 'foo', 'x': 42})" == repr(mp),
]
)
def test_immutable(self, mp):
"""
All mutating methods raise errors.
"""
with pytest.raises(TypeError, match="not support item assignment"):
mp["z"] = 23
with pytest.raises(TypeError, match="not support item deletion"):
del mp["x"]
with pytest.raises(AttributeError, match="no attribute 'update'"):
mp.update({})
with pytest.raises(AttributeError, match="no attribute 'clear'"):
mp.clear()
with pytest.raises(AttributeError, match="no attribute 'pop'"):
mp.pop("x")
with pytest.raises(AttributeError, match="no attribute 'popitem'"):
mp.popitem("x")
with pytest.raises(AttributeError, match="no attribute 'setdefault'"):
mp.setdefault("x")
|
|
af9f12cec4f187cac079360b4860c056cca014ef
|
tilezilla/db/_api.py
|
tilezilla/db/_api.py
|
from tilezilla.db.sqlite.tables import Base, TileSpec, Tile
if __name__ == '__main__':
from tilezilla import tilespec, products, stores
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
# engine = create_engine('sqlite:///:memory:', echo=True)
engine = create_engine('sqlite:///testing.db', echo=True)
Base.metadata.create_all(engine)
session = sessionmaker(bind=engine)()
# Create the tile specification
weld_conus = tilespec.TILESPECS['WELD_CONUS']
sql_weld_conus = TileSpec.from_class(weld_conus)
session.add(sql_weld_conus)
session.commit()
# Create a product
prod = products.ESPALandsat('tests/data/LT50120312002300-SC20151009172149_EPSG5070/')
# Find and add tiles for product
tiles = weld_conus.bounds_to_tile(prod.bounding_box(weld_conus.crs))
for tile in tiles:
sql_tile = Tile(ref_tilespec_id=sql_weld_conus.id,
horizontal=tile.horizontal,
vertical=tile.vertical,
hv='h{}v{}'.format(tile.horizontal, tile.vertical))
session.add(sql_tile)
session.commit()
from IPython.core.debugger import Pdb; Pdb().set_trace()
|
Add demo code for later
|
Add demo code for later
|
Python
|
bsd-3-clause
|
ceholden/landsat_tiles,ceholden/landsat_tiles,ceholden/landsat_tile,ceholden/landsat_tile,ceholden/tilezilla
|
Add demo code for later
|
from tilezilla.db.sqlite.tables import Base, TileSpec, Tile
if __name__ == '__main__':
from tilezilla import tilespec, products, stores
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
# engine = create_engine('sqlite:///:memory:', echo=True)
engine = create_engine('sqlite:///testing.db', echo=True)
Base.metadata.create_all(engine)
session = sessionmaker(bind=engine)()
# Create the tile specification
weld_conus = tilespec.TILESPECS['WELD_CONUS']
sql_weld_conus = TileSpec.from_class(weld_conus)
session.add(sql_weld_conus)
session.commit()
# Create a product
prod = products.ESPALandsat('tests/data/LT50120312002300-SC20151009172149_EPSG5070/')
# Find and add tiles for product
tiles = weld_conus.bounds_to_tile(prod.bounding_box(weld_conus.crs))
for tile in tiles:
sql_tile = Tile(ref_tilespec_id=sql_weld_conus.id,
horizontal=tile.horizontal,
vertical=tile.vertical,
hv='h{}v{}'.format(tile.horizontal, tile.vertical))
session.add(sql_tile)
session.commit()
from IPython.core.debugger import Pdb; Pdb().set_trace()
|
<commit_before><commit_msg>Add demo code for later<commit_after>
|
from tilezilla.db.sqlite.tables import Base, TileSpec, Tile
if __name__ == '__main__':
from tilezilla import tilespec, products, stores
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
# engine = create_engine('sqlite:///:memory:', echo=True)
engine = create_engine('sqlite:///testing.db', echo=True)
Base.metadata.create_all(engine)
session = sessionmaker(bind=engine)()
# Create the tile specification
weld_conus = tilespec.TILESPECS['WELD_CONUS']
sql_weld_conus = TileSpec.from_class(weld_conus)
session.add(sql_weld_conus)
session.commit()
# Create a product
prod = products.ESPALandsat('tests/data/LT50120312002300-SC20151009172149_EPSG5070/')
# Find and add tiles for product
tiles = weld_conus.bounds_to_tile(prod.bounding_box(weld_conus.crs))
for tile in tiles:
sql_tile = Tile(ref_tilespec_id=sql_weld_conus.id,
horizontal=tile.horizontal,
vertical=tile.vertical,
hv='h{}v{}'.format(tile.horizontal, tile.vertical))
session.add(sql_tile)
session.commit()
from IPython.core.debugger import Pdb; Pdb().set_trace()
|
Add demo code for laterfrom tilezilla.db.sqlite.tables import Base, TileSpec, Tile
if __name__ == '__main__':
from tilezilla import tilespec, products, stores
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
# engine = create_engine('sqlite:///:memory:', echo=True)
engine = create_engine('sqlite:///testing.db', echo=True)
Base.metadata.create_all(engine)
session = sessionmaker(bind=engine)()
# Create the tile specification
weld_conus = tilespec.TILESPECS['WELD_CONUS']
sql_weld_conus = TileSpec.from_class(weld_conus)
session.add(sql_weld_conus)
session.commit()
# Create a product
prod = products.ESPALandsat('tests/data/LT50120312002300-SC20151009172149_EPSG5070/')
# Find and add tiles for product
tiles = weld_conus.bounds_to_tile(prod.bounding_box(weld_conus.crs))
for tile in tiles:
sql_tile = Tile(ref_tilespec_id=sql_weld_conus.id,
horizontal=tile.horizontal,
vertical=tile.vertical,
hv='h{}v{}'.format(tile.horizontal, tile.vertical))
session.add(sql_tile)
session.commit()
from IPython.core.debugger import Pdb; Pdb().set_trace()
|
<commit_before><commit_msg>Add demo code for later<commit_after>from tilezilla.db.sqlite.tables import Base, TileSpec, Tile
if __name__ == '__main__':
from tilezilla import tilespec, products, stores
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
# engine = create_engine('sqlite:///:memory:', echo=True)
engine = create_engine('sqlite:///testing.db', echo=True)
Base.metadata.create_all(engine)
session = sessionmaker(bind=engine)()
# Create the tile specification
weld_conus = tilespec.TILESPECS['WELD_CONUS']
sql_weld_conus = TileSpec.from_class(weld_conus)
session.add(sql_weld_conus)
session.commit()
# Create a product
prod = products.ESPALandsat('tests/data/LT50120312002300-SC20151009172149_EPSG5070/')
# Find and add tiles for product
tiles = weld_conus.bounds_to_tile(prod.bounding_box(weld_conus.crs))
for tile in tiles:
sql_tile = Tile(ref_tilespec_id=sql_weld_conus.id,
horizontal=tile.horizontal,
vertical=tile.vertical,
hv='h{}v{}'.format(tile.horizontal, tile.vertical))
session.add(sql_tile)
session.commit()
from IPython.core.debugger import Pdb; Pdb().set_trace()
|
|
1d7c57d2d9346b396f836e39c188c2c0a2a1797b
|
tests/test_authtkt.py
|
tests/test_authtkt.py
|
import unittest
from yocommon.util import configure
import authtkt
class AuthTktTests(unittest.TestCase):
def setUp(self):
self.secret = 'hmac secret'
self.crypto_secret = 'top secret'
self.cookie = (
'MjQ4MWQ3ODEzNGI2MjE3N2I4OGQ4MDRjNTZkY2YxZGU1MWRkNjY3NjEyMzQ1Njc4O'
'TAhdGVzdCFnTGpIMGluemJTZTZHYzNaU2J6S1ljWTZkUlZ5VXloR0pWMWxpWXBYY3'
'RmTjAxQlRWZFRlOS96M0g0dmEyOUFEWFlDLytFNFpDTmQ5S09OSlBJZnFoWUNnNlF'
'DYWFDODM3NXdjS1RTbERVUlVUZlV6TUxWZHRZSVpic0JxeGxuRDgyWHBjeE9ORjZB'
'Y3pvQjlkNkU0N0xScGVVNjNmUTFpdFhOcFkwRExyUG8xdnlGWUtxZHNRTHBYUHc9P'
'Q==')
configure(SECRET=self.secret)
configure(CRYPTO_SECRET=self.crypto_secret)
def test_get_ticket_data_returns_proper_data(self):
data = authtkt.get_ticket_data(self.cookie)
self.assertTrue(all(
[key in data for key in (
'surname', 'name', 'id', 'tokens')]
))
def test_valid_ticket_validates_correctly(self):
self.assertTrue(authtkt.validate(self.cookie, self.secret))
def test_invalid_ticket_does_not_validate(self):
self.assertFalse(authtkt.validate('I am a banana', self.secret))
|
Add tests (Inspired by yocommon)
|
Add tests (Inspired by yocommon)
|
Python
|
mit
|
yola/auth_tkt
|
Add tests (Inspired by yocommon)
|
import unittest
from yocommon.util import configure
import authtkt
class AuthTktTests(unittest.TestCase):
def setUp(self):
self.secret = 'hmac secret'
self.crypto_secret = 'top secret'
self.cookie = (
'MjQ4MWQ3ODEzNGI2MjE3N2I4OGQ4MDRjNTZkY2YxZGU1MWRkNjY3NjEyMzQ1Njc4O'
'TAhdGVzdCFnTGpIMGluemJTZTZHYzNaU2J6S1ljWTZkUlZ5VXloR0pWMWxpWXBYY3'
'RmTjAxQlRWZFRlOS96M0g0dmEyOUFEWFlDLytFNFpDTmQ5S09OSlBJZnFoWUNnNlF'
'DYWFDODM3NXdjS1RTbERVUlVUZlV6TUxWZHRZSVpic0JxeGxuRDgyWHBjeE9ORjZB'
'Y3pvQjlkNkU0N0xScGVVNjNmUTFpdFhOcFkwRExyUG8xdnlGWUtxZHNRTHBYUHc9P'
'Q==')
configure(SECRET=self.secret)
configure(CRYPTO_SECRET=self.crypto_secret)
def test_get_ticket_data_returns_proper_data(self):
data = authtkt.get_ticket_data(self.cookie)
self.assertTrue(all(
[key in data for key in (
'surname', 'name', 'id', 'tokens')]
))
def test_valid_ticket_validates_correctly(self):
self.assertTrue(authtkt.validate(self.cookie, self.secret))
def test_invalid_ticket_does_not_validate(self):
self.assertFalse(authtkt.validate('I am a banana', self.secret))
|
<commit_before><commit_msg>Add tests (Inspired by yocommon)<commit_after>
|
import unittest
from yocommon.util import configure
import authtkt
class AuthTktTests(unittest.TestCase):
def setUp(self):
self.secret = 'hmac secret'
self.crypto_secret = 'top secret'
self.cookie = (
'MjQ4MWQ3ODEzNGI2MjE3N2I4OGQ4MDRjNTZkY2YxZGU1MWRkNjY3NjEyMzQ1Njc4O'
'TAhdGVzdCFnTGpIMGluemJTZTZHYzNaU2J6S1ljWTZkUlZ5VXloR0pWMWxpWXBYY3'
'RmTjAxQlRWZFRlOS96M0g0dmEyOUFEWFlDLytFNFpDTmQ5S09OSlBJZnFoWUNnNlF'
'DYWFDODM3NXdjS1RTbERVUlVUZlV6TUxWZHRZSVpic0JxeGxuRDgyWHBjeE9ORjZB'
'Y3pvQjlkNkU0N0xScGVVNjNmUTFpdFhOcFkwRExyUG8xdnlGWUtxZHNRTHBYUHc9P'
'Q==')
configure(SECRET=self.secret)
configure(CRYPTO_SECRET=self.crypto_secret)
def test_get_ticket_data_returns_proper_data(self):
data = authtkt.get_ticket_data(self.cookie)
self.assertTrue(all(
[key in data for key in (
'surname', 'name', 'id', 'tokens')]
))
def test_valid_ticket_validates_correctly(self):
self.assertTrue(authtkt.validate(self.cookie, self.secret))
def test_invalid_ticket_does_not_validate(self):
self.assertFalse(authtkt.validate('I am a banana', self.secret))
|
Add tests (Inspired by yocommon)import unittest
from yocommon.util import configure
import authtkt
class AuthTktTests(unittest.TestCase):
def setUp(self):
self.secret = 'hmac secret'
self.crypto_secret = 'top secret'
self.cookie = (
'MjQ4MWQ3ODEzNGI2MjE3N2I4OGQ4MDRjNTZkY2YxZGU1MWRkNjY3NjEyMzQ1Njc4O'
'TAhdGVzdCFnTGpIMGluemJTZTZHYzNaU2J6S1ljWTZkUlZ5VXloR0pWMWxpWXBYY3'
'RmTjAxQlRWZFRlOS96M0g0dmEyOUFEWFlDLytFNFpDTmQ5S09OSlBJZnFoWUNnNlF'
'DYWFDODM3NXdjS1RTbERVUlVUZlV6TUxWZHRZSVpic0JxeGxuRDgyWHBjeE9ORjZB'
'Y3pvQjlkNkU0N0xScGVVNjNmUTFpdFhOcFkwRExyUG8xdnlGWUtxZHNRTHBYUHc9P'
'Q==')
configure(SECRET=self.secret)
configure(CRYPTO_SECRET=self.crypto_secret)
def test_get_ticket_data_returns_proper_data(self):
data = authtkt.get_ticket_data(self.cookie)
self.assertTrue(all(
[key in data for key in (
'surname', 'name', 'id', 'tokens')]
))
def test_valid_ticket_validates_correctly(self):
self.assertTrue(authtkt.validate(self.cookie, self.secret))
def test_invalid_ticket_does_not_validate(self):
self.assertFalse(authtkt.validate('I am a banana', self.secret))
|
<commit_before><commit_msg>Add tests (Inspired by yocommon)<commit_after>import unittest
from yocommon.util import configure
import authtkt
class AuthTktTests(unittest.TestCase):
def setUp(self):
self.secret = 'hmac secret'
self.crypto_secret = 'top secret'
self.cookie = (
'MjQ4MWQ3ODEzNGI2MjE3N2I4OGQ4MDRjNTZkY2YxZGU1MWRkNjY3NjEyMzQ1Njc4O'
'TAhdGVzdCFnTGpIMGluemJTZTZHYzNaU2J6S1ljWTZkUlZ5VXloR0pWMWxpWXBYY3'
'RmTjAxQlRWZFRlOS96M0g0dmEyOUFEWFlDLytFNFpDTmQ5S09OSlBJZnFoWUNnNlF'
'DYWFDODM3NXdjS1RTbERVUlVUZlV6TUxWZHRZSVpic0JxeGxuRDgyWHBjeE9ORjZB'
'Y3pvQjlkNkU0N0xScGVVNjNmUTFpdFhOcFkwRExyUG8xdnlGWUtxZHNRTHBYUHc9P'
'Q==')
configure(SECRET=self.secret)
configure(CRYPTO_SECRET=self.crypto_secret)
def test_get_ticket_data_returns_proper_data(self):
data = authtkt.get_ticket_data(self.cookie)
self.assertTrue(all(
[key in data for key in (
'surname', 'name', 'id', 'tokens')]
))
def test_valid_ticket_validates_correctly(self):
self.assertTrue(authtkt.validate(self.cookie, self.secret))
def test_invalid_ticket_does_not_validate(self):
self.assertFalse(authtkt.validate('I am a banana', self.secret))
|
|
5dd8284d9f8b3891de74f22685270b058051c3f0
|
tests/test_inherit.py
|
tests/test_inherit.py
|
# -*- coding: utf-8 -*-
"""
Tests for inheritance in RegexLexer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import unittest
from pygments.lexer import RegexLexer, inherit
from pygments.token import Text
class InheritTest(unittest.TestCase):
def test_single_inheritance_position(self):
t = Two()
pats = [x[0].__self__.pattern for x in t._tokens['root']]
self.assertEqual(['x', 'a', 'b', 'y'], pats)
def test_multi_inheritance_beginning(self):
t = Beginning()
pats = [x[0].__self__.pattern for x in t._tokens['root']]
self.assertEqual(['x', 'a', 'b', 'y', 'm'], pats)
def test_multi_inheritance_end(self):
t = End()
pats = [x[0].__self__.pattern for x in t._tokens['root']]
self.assertEqual(['m', 'x', 'a', 'b', 'y'], pats)
def test_multi_inheritance_position(self):
t = Three()
pats = [x[0].__self__.pattern for x in t._tokens['root']]
self.assertEqual(['i', 'x', 'a', 'b', 'y', 'j'], pats)
def test_single_inheritance_with_skip(self):
t = Skipped()
pats = [x[0].__self__.pattern for x in t._tokens['root']]
self.assertEqual(['x', 'a', 'b', 'y'], pats)
class One(RegexLexer):
tokens = {
'root': [
('a', Text),
('b', Text),
],
}
class Two(One):
tokens = {
'root': [
('x', Text),
inherit,
('y', Text),
],
}
class Three(Two):
tokens = {
'root': [
('i', Text),
inherit,
('j', Text),
],
}
class Beginning(Two):
tokens = {
'root': [
inherit,
('m', Text),
],
}
class End(Two):
tokens = {
'root': [
('m', Text),
inherit,
],
}
class Empty(One):
tokens = {}
class Skipped(Empty):
tokens = {
'root': [
('x', Text),
inherit,
('y', Text),
],
}
|
Add test for RegexLexer inheritance (fails with current code).
|
Add test for RegexLexer inheritance (fails with current code).
|
Python
|
bsd-2-clause
|
aswinpj/Pygments,aswinpj/Pygments,aswinpj/Pygments,aswinpj/Pygments,aswinpj/Pygments,aswinpj/Pygments,aswinpj/Pygments,aswinpj/Pygments,aswinpj/Pygments,aswinpj/Pygments,aswinpj/Pygments,aswinpj/Pygments,aswinpj/Pygments,aswinpj/Pygments,aswinpj/Pygments,aswinpj/Pygments,aswinpj/Pygments,aswinpj/Pygments,aswinpj/Pygments
|
Add test for RegexLexer inheritance (fails with current code).
|
# -*- coding: utf-8 -*-
"""
Tests for inheritance in RegexLexer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import unittest
from pygments.lexer import RegexLexer, inherit
from pygments.token import Text
class InheritTest(unittest.TestCase):
def test_single_inheritance_position(self):
t = Two()
pats = [x[0].__self__.pattern for x in t._tokens['root']]
self.assertEqual(['x', 'a', 'b', 'y'], pats)
def test_multi_inheritance_beginning(self):
t = Beginning()
pats = [x[0].__self__.pattern for x in t._tokens['root']]
self.assertEqual(['x', 'a', 'b', 'y', 'm'], pats)
def test_multi_inheritance_end(self):
t = End()
pats = [x[0].__self__.pattern for x in t._tokens['root']]
self.assertEqual(['m', 'x', 'a', 'b', 'y'], pats)
def test_multi_inheritance_position(self):
t = Three()
pats = [x[0].__self__.pattern for x in t._tokens['root']]
self.assertEqual(['i', 'x', 'a', 'b', 'y', 'j'], pats)
def test_single_inheritance_with_skip(self):
t = Skipped()
pats = [x[0].__self__.pattern for x in t._tokens['root']]
self.assertEqual(['x', 'a', 'b', 'y'], pats)
class One(RegexLexer):
tokens = {
'root': [
('a', Text),
('b', Text),
],
}
class Two(One):
tokens = {
'root': [
('x', Text),
inherit,
('y', Text),
],
}
class Three(Two):
tokens = {
'root': [
('i', Text),
inherit,
('j', Text),
],
}
class Beginning(Two):
tokens = {
'root': [
inherit,
('m', Text),
],
}
class End(Two):
tokens = {
'root': [
('m', Text),
inherit,
],
}
class Empty(One):
tokens = {}
class Skipped(Empty):
tokens = {
'root': [
('x', Text),
inherit,
('y', Text),
],
}
|
<commit_before><commit_msg>Add test for RegexLexer inheritance (fails with current code).<commit_after>
|
# -*- coding: utf-8 -*-
"""
Tests for inheritance in RegexLexer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import unittest
from pygments.lexer import RegexLexer, inherit
from pygments.token import Text
class InheritTest(unittest.TestCase):
def test_single_inheritance_position(self):
t = Two()
pats = [x[0].__self__.pattern for x in t._tokens['root']]
self.assertEqual(['x', 'a', 'b', 'y'], pats)
def test_multi_inheritance_beginning(self):
t = Beginning()
pats = [x[0].__self__.pattern for x in t._tokens['root']]
self.assertEqual(['x', 'a', 'b', 'y', 'm'], pats)
def test_multi_inheritance_end(self):
t = End()
pats = [x[0].__self__.pattern for x in t._tokens['root']]
self.assertEqual(['m', 'x', 'a', 'b', 'y'], pats)
def test_multi_inheritance_position(self):
t = Three()
pats = [x[0].__self__.pattern for x in t._tokens['root']]
self.assertEqual(['i', 'x', 'a', 'b', 'y', 'j'], pats)
def test_single_inheritance_with_skip(self):
t = Skipped()
pats = [x[0].__self__.pattern for x in t._tokens['root']]
self.assertEqual(['x', 'a', 'b', 'y'], pats)
class One(RegexLexer):
tokens = {
'root': [
('a', Text),
('b', Text),
],
}
class Two(One):
tokens = {
'root': [
('x', Text),
inherit,
('y', Text),
],
}
class Three(Two):
tokens = {
'root': [
('i', Text),
inherit,
('j', Text),
],
}
class Beginning(Two):
tokens = {
'root': [
inherit,
('m', Text),
],
}
class End(Two):
tokens = {
'root': [
('m', Text),
inherit,
],
}
class Empty(One):
tokens = {}
class Skipped(Empty):
tokens = {
'root': [
('x', Text),
inherit,
('y', Text),
],
}
|
Add test for RegexLexer inheritance (fails with current code).# -*- coding: utf-8 -*-
"""
Tests for inheritance in RegexLexer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import unittest
from pygments.lexer import RegexLexer, inherit
from pygments.token import Text
class InheritTest(unittest.TestCase):
def test_single_inheritance_position(self):
t = Two()
pats = [x[0].__self__.pattern for x in t._tokens['root']]
self.assertEqual(['x', 'a', 'b', 'y'], pats)
def test_multi_inheritance_beginning(self):
t = Beginning()
pats = [x[0].__self__.pattern for x in t._tokens['root']]
self.assertEqual(['x', 'a', 'b', 'y', 'm'], pats)
def test_multi_inheritance_end(self):
t = End()
pats = [x[0].__self__.pattern for x in t._tokens['root']]
self.assertEqual(['m', 'x', 'a', 'b', 'y'], pats)
def test_multi_inheritance_position(self):
t = Three()
pats = [x[0].__self__.pattern for x in t._tokens['root']]
self.assertEqual(['i', 'x', 'a', 'b', 'y', 'j'], pats)
def test_single_inheritance_with_skip(self):
t = Skipped()
pats = [x[0].__self__.pattern for x in t._tokens['root']]
self.assertEqual(['x', 'a', 'b', 'y'], pats)
class One(RegexLexer):
tokens = {
'root': [
('a', Text),
('b', Text),
],
}
class Two(One):
tokens = {
'root': [
('x', Text),
inherit,
('y', Text),
],
}
class Three(Two):
tokens = {
'root': [
('i', Text),
inherit,
('j', Text),
],
}
class Beginning(Two):
tokens = {
'root': [
inherit,
('m', Text),
],
}
class End(Two):
tokens = {
'root': [
('m', Text),
inherit,
],
}
class Empty(One):
tokens = {}
class Skipped(Empty):
tokens = {
'root': [
('x', Text),
inherit,
('y', Text),
],
}
|
<commit_before><commit_msg>Add test for RegexLexer inheritance (fails with current code).<commit_after># -*- coding: utf-8 -*-
"""
Tests for inheritance in RegexLexer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import unittest
from pygments.lexer import RegexLexer, inherit
from pygments.token import Text
class InheritTest(unittest.TestCase):
def test_single_inheritance_position(self):
t = Two()
pats = [x[0].__self__.pattern for x in t._tokens['root']]
self.assertEqual(['x', 'a', 'b', 'y'], pats)
def test_multi_inheritance_beginning(self):
t = Beginning()
pats = [x[0].__self__.pattern for x in t._tokens['root']]
self.assertEqual(['x', 'a', 'b', 'y', 'm'], pats)
def test_multi_inheritance_end(self):
t = End()
pats = [x[0].__self__.pattern for x in t._tokens['root']]
self.assertEqual(['m', 'x', 'a', 'b', 'y'], pats)
def test_multi_inheritance_position(self):
t = Three()
pats = [x[0].__self__.pattern for x in t._tokens['root']]
self.assertEqual(['i', 'x', 'a', 'b', 'y', 'j'], pats)
def test_single_inheritance_with_skip(self):
t = Skipped()
pats = [x[0].__self__.pattern for x in t._tokens['root']]
self.assertEqual(['x', 'a', 'b', 'y'], pats)
class One(RegexLexer):
tokens = {
'root': [
('a', Text),
('b', Text),
],
}
class Two(One):
tokens = {
'root': [
('x', Text),
inherit,
('y', Text),
],
}
class Three(Two):
tokens = {
'root': [
('i', Text),
inherit,
('j', Text),
],
}
class Beginning(Two):
tokens = {
'root': [
inherit,
('m', Text),
],
}
class End(Two):
tokens = {
'root': [
('m', Text),
inherit,
],
}
class Empty(One):
tokens = {}
class Skipped(Empty):
tokens = {
'root': [
('x', Text),
inherit,
('y', Text),
],
}
|
|
b52db04e4f57e805b3ff9a1b9a5ae61eb1a152d0
|
rx-tests/rx-async-test-with-closure.py
|
rx-tests/rx-async-test-with-closure.py
|
#!/usr/bin/env python3
'''
Process two different streams in parallel,
retrieving them from the module's scope
'''
import rx
import asyncio
from concurrent.futures import ThreadPoolExecutor
from functools import partial
import APIReaderTwitter as Twitter
try:
import json
except ImportError:
import simplejson as json
def is_not_delete(element):
return not "delete" in element
def is_delete(element):
return "delete" in element
def pretty_print(element):
print(json.dumps(element, indent=4))
twitter_stream = rx.Observable.from_(Twitter.get_iterable())
deleted_stream = twitter_stream.filter(is_not_delete)
tweet_stream = twitter_stream.filter(is_delete)
def process_deleted():
deleted_stream.subscribe(pretty_print)
def process_tweets():
tweet_stream.subscribe(pretty_print)
if __name__ == "__main__":
executor = ThreadPoolExecutor(2)
loop = asyncio.get_event_loop()
deleted = asyncio.async(loop.run_in_executor(executor, process_deleted))
tweets = asyncio.async(loop.run_in_executor(executor, process_tweets))
loop.run_forever()
|
Add example of processing two different streams in separate threads retrieving the streams from the module's global scope.
|
Add example of processing two different streams in separate threads
retrieving the streams from the module's global scope.
|
Python
|
mit
|
Pysellus/streaming-api-test,Pysellus/streaming-api-test
|
Add example of processing two different streams in separate threads
retrieving the streams from the module's global scope.
|
#!/usr/bin/env python3
'''
Process two different streams in parallel,
retrieving them from the module's scope
'''
import rx
import asyncio
from concurrent.futures import ThreadPoolExecutor
from functools import partial
import APIReaderTwitter as Twitter
try:
import json
except ImportError:
import simplejson as json
def is_not_delete(element):
return not "delete" in element
def is_delete(element):
return "delete" in element
def pretty_print(element):
print(json.dumps(element, indent=4))
twitter_stream = rx.Observable.from_(Twitter.get_iterable())
deleted_stream = twitter_stream.filter(is_not_delete)
tweet_stream = twitter_stream.filter(is_delete)
def process_deleted():
deleted_stream.subscribe(pretty_print)
def process_tweets():
tweet_stream.subscribe(pretty_print)
if __name__ == "__main__":
executor = ThreadPoolExecutor(2)
loop = asyncio.get_event_loop()
deleted = asyncio.async(loop.run_in_executor(executor, process_deleted))
tweets = asyncio.async(loop.run_in_executor(executor, process_tweets))
loop.run_forever()
|
<commit_before><commit_msg>Add example of processing two different streams in separate threads
retrieving the streams from the module's global scope.<commit_after>
|
#!/usr/bin/env python3
'''
Process two different streams in parallel,
retrieving them from the module's scope
'''
import rx
import asyncio
from concurrent.futures import ThreadPoolExecutor
from functools import partial
import APIReaderTwitter as Twitter
try:
import json
except ImportError:
import simplejson as json
def is_not_delete(element):
return not "delete" in element
def is_delete(element):
return "delete" in element
def pretty_print(element):
print(json.dumps(element, indent=4))
twitter_stream = rx.Observable.from_(Twitter.get_iterable())
deleted_stream = twitter_stream.filter(is_not_delete)
tweet_stream = twitter_stream.filter(is_delete)
def process_deleted():
deleted_stream.subscribe(pretty_print)
def process_tweets():
tweet_stream.subscribe(pretty_print)
if __name__ == "__main__":
executor = ThreadPoolExecutor(2)
loop = asyncio.get_event_loop()
deleted = asyncio.async(loop.run_in_executor(executor, process_deleted))
tweets = asyncio.async(loop.run_in_executor(executor, process_tweets))
loop.run_forever()
|
Add example of processing two different streams in separate threads
retrieving the streams from the module's global scope.#!/usr/bin/env python3
'''
Process two different streams in parallel,
retrieving them from the module's scope
'''
import rx
import asyncio
from concurrent.futures import ThreadPoolExecutor
from functools import partial
import APIReaderTwitter as Twitter
try:
import json
except ImportError:
import simplejson as json
def is_not_delete(element):
return not "delete" in element
def is_delete(element):
return "delete" in element
def pretty_print(element):
print(json.dumps(element, indent=4))
twitter_stream = rx.Observable.from_(Twitter.get_iterable())
deleted_stream = twitter_stream.filter(is_not_delete)
tweet_stream = twitter_stream.filter(is_delete)
def process_deleted():
deleted_stream.subscribe(pretty_print)
def process_tweets():
tweet_stream.subscribe(pretty_print)
if __name__ == "__main__":
executor = ThreadPoolExecutor(2)
loop = asyncio.get_event_loop()
deleted = asyncio.async(loop.run_in_executor(executor, process_deleted))
tweets = asyncio.async(loop.run_in_executor(executor, process_tweets))
loop.run_forever()
|
<commit_before><commit_msg>Add example of processing two different streams in separate threads
retrieving the streams from the module's global scope.<commit_after>#!/usr/bin/env python3
'''
Process two different streams in parallel,
retrieving them from the module's scope
'''
import rx
import asyncio
from concurrent.futures import ThreadPoolExecutor
from functools import partial
import APIReaderTwitter as Twitter
try:
import json
except ImportError:
import simplejson as json
def is_not_delete(element):
return not "delete" in element
def is_delete(element):
return "delete" in element
def pretty_print(element):
print(json.dumps(element, indent=4))
twitter_stream = rx.Observable.from_(Twitter.get_iterable())
deleted_stream = twitter_stream.filter(is_not_delete)
tweet_stream = twitter_stream.filter(is_delete)
def process_deleted():
deleted_stream.subscribe(pretty_print)
def process_tweets():
tweet_stream.subscribe(pretty_print)
if __name__ == "__main__":
executor = ThreadPoolExecutor(2)
loop = asyncio.get_event_loop()
deleted = asyncio.async(loop.run_in_executor(executor, process_deleted))
tweets = asyncio.async(loop.run_in_executor(executor, process_tweets))
loop.run_forever()
|
|
658d92bf118a70f6aa50cc20f78468f7895e077a
|
examples/expose-application.py
|
examples/expose-application.py
|
"""
This example:
1. Connects to the current model.
2. Deploys a charm and waits until it reports itself active.
3. Demonstrates exposing application endpoints to space and CIDR combinations.
3. Demonstrates unexposing application endpoints.
NOTE: this test must be run against a 2.9 controller.
"""
from juju import loop
from juju.model import Model
from juju.application import ExposedEndpoint
import logging
async def main():
model = Model()
print('Connecting to model')
# connect to current model with current user, per Juju CLI
await model.connect()
try:
print('Deploying ubuntu')
application = await model.deploy(
'cs:~jameinel/ubuntu-lite-7',
application_name='ubuntu',
series='trusty',
channel='stable',
)
print('Waiting for active')
await model.block_until(
lambda: all(unit.workload_status == 'active'
for unit in application.units))
print('Expose all opened port ranges')
await application.expose()
print('Expose all opened port ranges to the CIDRs that correspond to a list of spaces')
await application.expose(exposed_endpoints={
"": ExposedEndpoint(to_spaces=["alpha"])
})
print('Expose all opened port ranges to a list of CIDRs')
await application.expose(exposed_endpoints={
"": ExposedEndpoint(to_cidrs=["10.0.0.0/24"])
})
print('Expose all opened port ranges to a list of spaces and CIDRs')
await application.expose(exposed_endpoints={
"": ExposedEndpoint(to_spaces=["alpha"], to_cidrs=["10.0.0.0/24"])
})
print('Expose individual endpoints to different space/CIDR combinations')
await application.expose(exposed_endpoints={
"": ExposedEndpoint(to_spaces=["alpha"], to_cidrs=["10.0.0.0/24"]),
"ubuntu": ExposedEndpoint(to_cidrs=["10.42.42.0/24"])
})
print('Unexpose individual endpoints (other endpoints remain exposed)')
await application.unexpose(exposed_endpoints=["ubuntu"])
print('Unexpose application')
await application.unexpose()
print('Removing ubuntu')
await application.remove()
finally:
print('Disconnecting from model')
await model.disconnect()
if __name__ == '__main__':
loop.run(main())
|
Add example for the expose/unexpose methods
|
Add example for the expose/unexpose methods
|
Python
|
apache-2.0
|
juju/python-libjuju,juju/python-libjuju
|
Add example for the expose/unexpose methods
|
"""
This example:
1. Connects to the current model.
2. Deploys a charm and waits until it reports itself active.
3. Demonstrates exposing application endpoints to space and CIDR combinations.
3. Demonstrates unexposing application endpoints.
NOTE: this test must be run against a 2.9 controller.
"""
from juju import loop
from juju.model import Model
from juju.application import ExposedEndpoint
import logging
async def main():
model = Model()
print('Connecting to model')
# connect to current model with current user, per Juju CLI
await model.connect()
try:
print('Deploying ubuntu')
application = await model.deploy(
'cs:~jameinel/ubuntu-lite-7',
application_name='ubuntu',
series='trusty',
channel='stable',
)
print('Waiting for active')
await model.block_until(
lambda: all(unit.workload_status == 'active'
for unit in application.units))
print('Expose all opened port ranges')
await application.expose()
print('Expose all opened port ranges to the CIDRs that correspond to a list of spaces')
await application.expose(exposed_endpoints={
"": ExposedEndpoint(to_spaces=["alpha"])
})
print('Expose all opened port ranges to a list of CIDRs')
await application.expose(exposed_endpoints={
"": ExposedEndpoint(to_cidrs=["10.0.0.0/24"])
})
print('Expose all opened port ranges to a list of spaces and CIDRs')
await application.expose(exposed_endpoints={
"": ExposedEndpoint(to_spaces=["alpha"], to_cidrs=["10.0.0.0/24"])
})
print('Expose individual endpoints to different space/CIDR combinations')
await application.expose(exposed_endpoints={
"": ExposedEndpoint(to_spaces=["alpha"], to_cidrs=["10.0.0.0/24"]),
"ubuntu": ExposedEndpoint(to_cidrs=["10.42.42.0/24"])
})
print('Unexpose individual endpoints (other endpoints remain exposed)')
await application.unexpose(exposed_endpoints=["ubuntu"])
print('Unexpose application')
await application.unexpose()
print('Removing ubuntu')
await application.remove()
finally:
print('Disconnecting from model')
await model.disconnect()
if __name__ == '__main__':
loop.run(main())
|
<commit_before><commit_msg>Add example for the expose/unexpose methods<commit_after>
|
"""
This example:
1. Connects to the current model.
2. Deploys a charm and waits until it reports itself active.
3. Demonstrates exposing application endpoints to space and CIDR combinations.
3. Demonstrates unexposing application endpoints.
NOTE: this test must be run against a 2.9 controller.
"""
from juju import loop
from juju.model import Model
from juju.application import ExposedEndpoint
import logging
async def main():
model = Model()
print('Connecting to model')
# connect to current model with current user, per Juju CLI
await model.connect()
try:
print('Deploying ubuntu')
application = await model.deploy(
'cs:~jameinel/ubuntu-lite-7',
application_name='ubuntu',
series='trusty',
channel='stable',
)
print('Waiting for active')
await model.block_until(
lambda: all(unit.workload_status == 'active'
for unit in application.units))
print('Expose all opened port ranges')
await application.expose()
print('Expose all opened port ranges to the CIDRs that correspond to a list of spaces')
await application.expose(exposed_endpoints={
"": ExposedEndpoint(to_spaces=["alpha"])
})
print('Expose all opened port ranges to a list of CIDRs')
await application.expose(exposed_endpoints={
"": ExposedEndpoint(to_cidrs=["10.0.0.0/24"])
})
print('Expose all opened port ranges to a list of spaces and CIDRs')
await application.expose(exposed_endpoints={
"": ExposedEndpoint(to_spaces=["alpha"], to_cidrs=["10.0.0.0/24"])
})
print('Expose individual endpoints to different space/CIDR combinations')
await application.expose(exposed_endpoints={
"": ExposedEndpoint(to_spaces=["alpha"], to_cidrs=["10.0.0.0/24"]),
"ubuntu": ExposedEndpoint(to_cidrs=["10.42.42.0/24"])
})
print('Unexpose individual endpoints (other endpoints remain exposed)')
await application.unexpose(exposed_endpoints=["ubuntu"])
print('Unexpose application')
await application.unexpose()
print('Removing ubuntu')
await application.remove()
finally:
print('Disconnecting from model')
await model.disconnect()
if __name__ == '__main__':
loop.run(main())
|
Add example for the expose/unexpose methods"""
This example:
1. Connects to the current model.
2. Deploys a charm and waits until it reports itself active.
3. Demonstrates exposing application endpoints to space and CIDR combinations.
3. Demonstrates unexposing application endpoints.
NOTE: this test must be run against a 2.9 controller.
"""
from juju import loop
from juju.model import Model
from juju.application import ExposedEndpoint
import logging
async def main():
model = Model()
print('Connecting to model')
# connect to current model with current user, per Juju CLI
await model.connect()
try:
print('Deploying ubuntu')
application = await model.deploy(
'cs:~jameinel/ubuntu-lite-7',
application_name='ubuntu',
series='trusty',
channel='stable',
)
print('Waiting for active')
await model.block_until(
lambda: all(unit.workload_status == 'active'
for unit in application.units))
print('Expose all opened port ranges')
await application.expose()
print('Expose all opened port ranges to the CIDRs that correspond to a list of spaces')
await application.expose(exposed_endpoints={
"": ExposedEndpoint(to_spaces=["alpha"])
})
print('Expose all opened port ranges to a list of CIDRs')
await application.expose(exposed_endpoints={
"": ExposedEndpoint(to_cidrs=["10.0.0.0/24"])
})
print('Expose all opened port ranges to a list of spaces and CIDRs')
await application.expose(exposed_endpoints={
"": ExposedEndpoint(to_spaces=["alpha"], to_cidrs=["10.0.0.0/24"])
})
print('Expose individual endpoints to different space/CIDR combinations')
await application.expose(exposed_endpoints={
"": ExposedEndpoint(to_spaces=["alpha"], to_cidrs=["10.0.0.0/24"]),
"ubuntu": ExposedEndpoint(to_cidrs=["10.42.42.0/24"])
})
print('Unexpose individual endpoints (other endpoints remain exposed)')
await application.unexpose(exposed_endpoints=["ubuntu"])
print('Unexpose application')
await application.unexpose()
print('Removing ubuntu')
await application.remove()
finally:
print('Disconnecting from model')
await model.disconnect()
if __name__ == '__main__':
loop.run(main())
|
<commit_before><commit_msg>Add example for the expose/unexpose methods<commit_after>"""
This example:
1. Connects to the current model.
2. Deploys a charm and waits until it reports itself active.
3. Demonstrates exposing application endpoints to space and CIDR combinations.
3. Demonstrates unexposing application endpoints.
NOTE: this test must be run against a 2.9 controller.
"""
from juju import loop
from juju.model import Model
from juju.application import ExposedEndpoint
import logging
async def main():
model = Model()
print('Connecting to model')
# connect to current model with current user, per Juju CLI
await model.connect()
try:
print('Deploying ubuntu')
application = await model.deploy(
'cs:~jameinel/ubuntu-lite-7',
application_name='ubuntu',
series='trusty',
channel='stable',
)
print('Waiting for active')
await model.block_until(
lambda: all(unit.workload_status == 'active'
for unit in application.units))
print('Expose all opened port ranges')
await application.expose()
print('Expose all opened port ranges to the CIDRs that correspond to a list of spaces')
await application.expose(exposed_endpoints={
"": ExposedEndpoint(to_spaces=["alpha"])
})
print('Expose all opened port ranges to a list of CIDRs')
await application.expose(exposed_endpoints={
"": ExposedEndpoint(to_cidrs=["10.0.0.0/24"])
})
print('Expose all opened port ranges to a list of spaces and CIDRs')
await application.expose(exposed_endpoints={
"": ExposedEndpoint(to_spaces=["alpha"], to_cidrs=["10.0.0.0/24"])
})
print('Expose individual endpoints to different space/CIDR combinations')
await application.expose(exposed_endpoints={
"": ExposedEndpoint(to_spaces=["alpha"], to_cidrs=["10.0.0.0/24"]),
"ubuntu": ExposedEndpoint(to_cidrs=["10.42.42.0/24"])
})
print('Unexpose individual endpoints (other endpoints remain exposed)')
await application.unexpose(exposed_endpoints=["ubuntu"])
print('Unexpose application')
await application.unexpose()
print('Removing ubuntu')
await application.remove()
finally:
print('Disconnecting from model')
await model.disconnect()
if __name__ == '__main__':
loop.run(main())
|
|
abd7c37bee88841ccddf057a430f72f0313eb19c
|
maediprojects/query/finances.py
|
maediprojects/query/finances.py
|
from maediprojects import db, models
from sqlalchemy import *
import datetime
def isostring_date(value):
# Returns a date object from a string of format YYYY-MM-DD
return datetime.datetime.strptime(value, "%Y-%m-%d")
def isostring_year(value):
# Returns a date object from a string of format YYYY
return datetime.datetime.strptime(value, "%Y")
def add_finances(activity_id, data):
aF = models.ActivityFinances()
aF.activity_id = activity_id
#aF.transaction_date = data["transaction_date"]
aF.transaction_type = data["transaction_type"]
#aF.transaction_value = data["transaction_value"]
#aF.transaction_description = data["transaction_description"]
db.session.add(aF)
db.session.commit()
return aF.id
def delete_finances(activity_id, finances_id):
checkF = models.ActivityFinances.query.filter_by(
activity_id = activity_id,
id = finances_id
).first()
if checkF:
db.session.delete(checkF)
db.session.commit()
return True
return False
def update_attr(data):
finance = models.ActivityFinances.query.filter_by(
id = data['finances_id']
).first()
if data['attr'].endswith('date'):
if data["value"] == "":
data["value"] = None
else:
data['value'] = isostring_date(data['value'])
setattr(finance, data['attr'], data['value'])
db.session.add(finance)
db.session.commit()
return True
|
Add ability to create and update financial data
|
Add ability to create and update financial data
|
Python
|
agpl-3.0
|
markbrough/maedi-projects,markbrough/maedi-projects,markbrough/maedi-projects
|
Add ability to create and update financial data
|
from maediprojects import db, models
from sqlalchemy import *
import datetime
def isostring_date(value):
# Returns a date object from a string of format YYYY-MM-DD
return datetime.datetime.strptime(value, "%Y-%m-%d")
def isostring_year(value):
# Returns a date object from a string of format YYYY
return datetime.datetime.strptime(value, "%Y")
def add_finances(activity_id, data):
aF = models.ActivityFinances()
aF.activity_id = activity_id
#aF.transaction_date = data["transaction_date"]
aF.transaction_type = data["transaction_type"]
#aF.transaction_value = data["transaction_value"]
#aF.transaction_description = data["transaction_description"]
db.session.add(aF)
db.session.commit()
return aF.id
def delete_finances(activity_id, finances_id):
checkF = models.ActivityFinances.query.filter_by(
activity_id = activity_id,
id = finances_id
).first()
if checkF:
db.session.delete(checkF)
db.session.commit()
return True
return False
def update_attr(data):
finance = models.ActivityFinances.query.filter_by(
id = data['finances_id']
).first()
if data['attr'].endswith('date'):
if data["value"] == "":
data["value"] = None
else:
data['value'] = isostring_date(data['value'])
setattr(finance, data['attr'], data['value'])
db.session.add(finance)
db.session.commit()
return True
|
<commit_before><commit_msg>Add ability to create and update financial data<commit_after>
|
from maediprojects import db, models
from sqlalchemy import *
import datetime
def isostring_date(value):
# Returns a date object from a string of format YYYY-MM-DD
return datetime.datetime.strptime(value, "%Y-%m-%d")
def isostring_year(value):
# Returns a date object from a string of format YYYY
return datetime.datetime.strptime(value, "%Y")
def add_finances(activity_id, data):
aF = models.ActivityFinances()
aF.activity_id = activity_id
#aF.transaction_date = data["transaction_date"]
aF.transaction_type = data["transaction_type"]
#aF.transaction_value = data["transaction_value"]
#aF.transaction_description = data["transaction_description"]
db.session.add(aF)
db.session.commit()
return aF.id
def delete_finances(activity_id, finances_id):
checkF = models.ActivityFinances.query.filter_by(
activity_id = activity_id,
id = finances_id
).first()
if checkF:
db.session.delete(checkF)
db.session.commit()
return True
return False
def update_attr(data):
finance = models.ActivityFinances.query.filter_by(
id = data['finances_id']
).first()
if data['attr'].endswith('date'):
if data["value"] == "":
data["value"] = None
else:
data['value'] = isostring_date(data['value'])
setattr(finance, data['attr'], data['value'])
db.session.add(finance)
db.session.commit()
return True
|
Add ability to create and update financial datafrom maediprojects import db, models
from sqlalchemy import *
import datetime
def isostring_date(value):
# Returns a date object from a string of format YYYY-MM-DD
return datetime.datetime.strptime(value, "%Y-%m-%d")
def isostring_year(value):
# Returns a date object from a string of format YYYY
return datetime.datetime.strptime(value, "%Y")
def add_finances(activity_id, data):
aF = models.ActivityFinances()
aF.activity_id = activity_id
#aF.transaction_date = data["transaction_date"]
aF.transaction_type = data["transaction_type"]
#aF.transaction_value = data["transaction_value"]
#aF.transaction_description = data["transaction_description"]
db.session.add(aF)
db.session.commit()
return aF.id
def delete_finances(activity_id, finances_id):
checkF = models.ActivityFinances.query.filter_by(
activity_id = activity_id,
id = finances_id
).first()
if checkF:
db.session.delete(checkF)
db.session.commit()
return True
return False
def update_attr(data):
finance = models.ActivityFinances.query.filter_by(
id = data['finances_id']
).first()
if data['attr'].endswith('date'):
if data["value"] == "":
data["value"] = None
else:
data['value'] = isostring_date(data['value'])
setattr(finance, data['attr'], data['value'])
db.session.add(finance)
db.session.commit()
return True
|
<commit_before><commit_msg>Add ability to create and update financial data<commit_after>from maediprojects import db, models
from sqlalchemy import *
import datetime
def isostring_date(value):
# Returns a date object from a string of format YYYY-MM-DD
return datetime.datetime.strptime(value, "%Y-%m-%d")
def isostring_year(value):
# Returns a date object from a string of format YYYY
return datetime.datetime.strptime(value, "%Y")
def add_finances(activity_id, data):
aF = models.ActivityFinances()
aF.activity_id = activity_id
#aF.transaction_date = data["transaction_date"]
aF.transaction_type = data["transaction_type"]
#aF.transaction_value = data["transaction_value"]
#aF.transaction_description = data["transaction_description"]
db.session.add(aF)
db.session.commit()
return aF.id
def delete_finances(activity_id, finances_id):
checkF = models.ActivityFinances.query.filter_by(
activity_id = activity_id,
id = finances_id
).first()
if checkF:
db.session.delete(checkF)
db.session.commit()
return True
return False
def update_attr(data):
finance = models.ActivityFinances.query.filter_by(
id = data['finances_id']
).first()
if data['attr'].endswith('date'):
if data["value"] == "":
data["value"] = None
else:
data['value'] = isostring_date(data['value'])
setattr(finance, data['attr'], data['value'])
db.session.add(finance)
db.session.commit()
return True
|
|
c603dc219d47ef255ef30447526e9c8dff82a5db
|
blues/python.py
|
blues/python.py
|
"""
Python Blueprint
================
Does not install python itself, only develop and setup tools.
Contains pip helper for other blueprints to use.
**Fabric environment:**
.. code-block:: yaml
blueprints:
- blues.python
"""
from fabric.decorators import task
from refabric.api import run, info
from refabric.context_managers import sudo
from . import debian
__all__ = ['setup']
@task
def setup():
"""
Install python develop tools
"""
install()
def install():
with sudo():
info('Install python dependencies')
debian.apt_get('install', 'python-dev', 'python-setuptools')
run('easy_install -0 pip')
def pip(command, *options):
info('Running pip {}', command)
run('pip {} {} -v --log=/tmp/pip.log --log-file=/tmp/pip.log'.format(command, ' '.join(options)))
|
"""
Python Blueprint
================
Does not install python itself, only develop and setup tools.
Contains pip helper for other blueprints to use.
**Fabric environment:**
.. code-block:: yaml
blueprints:
- blues.python
"""
from fabric.decorators import task
from refabric.api import run, info
from refabric.context_managers import sudo
from . import debian
__all__ = ['setup']
pip_log_file = '/tmp/pip.log'
@task
def setup():
"""
Install python develop tools
"""
install()
def install():
with sudo():
info('Install python dependencies')
debian.apt_get('install', 'python-dev', 'python-setuptools')
run('easy_install -0 pip')
run('touch {}'.format(pip_log_file))
debian.chmod(pip_log_file, mode=777)
def pip(command, *options):
info('Running pip {}', command)
run('pip {0} {1} -v --log={2} --log-file={2}'.format(command, ' '.join(options), pip_log_file))
|
Make pip log world writable
|
Make pip log world writable
|
Python
|
mit
|
adisbladis/blues,jocke-l/blues,gelbander/blues,jocke-l/blues,Sportamore/blues,gelbander/blues,chrippa/blues,andreif/blues,gelbander/blues,5monkeys/blues,Sportamore/blues,chrippa/blues,adisbladis/blues,andreif/blues,adisbladis/blues,5monkeys/blues,jocke-l/blues,Sportamore/blues,andreif/blues,5monkeys/blues,chrippa/blues
|
"""
Python Blueprint
================
Does not install python itself, only develop and setup tools.
Contains pip helper for other blueprints to use.
**Fabric environment:**
.. code-block:: yaml
blueprints:
- blues.python
"""
from fabric.decorators import task
from refabric.api import run, info
from refabric.context_managers import sudo
from . import debian
__all__ = ['setup']
@task
def setup():
"""
Install python develop tools
"""
install()
def install():
with sudo():
info('Install python dependencies')
debian.apt_get('install', 'python-dev', 'python-setuptools')
run('easy_install -0 pip')
def pip(command, *options):
info('Running pip {}', command)
run('pip {} {} -v --log=/tmp/pip.log --log-file=/tmp/pip.log'.format(command, ' '.join(options)))
Make pip log world writable
|
"""
Python Blueprint
================
Does not install python itself, only develop and setup tools.
Contains pip helper for other blueprints to use.
**Fabric environment:**
.. code-block:: yaml
blueprints:
- blues.python
"""
from fabric.decorators import task
from refabric.api import run, info
from refabric.context_managers import sudo
from . import debian
__all__ = ['setup']
pip_log_file = '/tmp/pip.log'
@task
def setup():
"""
Install python develop tools
"""
install()
def install():
with sudo():
info('Install python dependencies')
debian.apt_get('install', 'python-dev', 'python-setuptools')
run('easy_install -0 pip')
run('touch {}'.format(pip_log_file))
debian.chmod(pip_log_file, mode=777)
def pip(command, *options):
info('Running pip {}', command)
run('pip {0} {1} -v --log={2} --log-file={2}'.format(command, ' '.join(options), pip_log_file))
|
<commit_before>"""
Python Blueprint
================
Does not install python itself, only develop and setup tools.
Contains pip helper for other blueprints to use.
**Fabric environment:**
.. code-block:: yaml
blueprints:
- blues.python
"""
from fabric.decorators import task
from refabric.api import run, info
from refabric.context_managers import sudo
from . import debian
__all__ = ['setup']
@task
def setup():
"""
Install python develop tools
"""
install()
def install():
with sudo():
info('Install python dependencies')
debian.apt_get('install', 'python-dev', 'python-setuptools')
run('easy_install -0 pip')
def pip(command, *options):
info('Running pip {}', command)
run('pip {} {} -v --log=/tmp/pip.log --log-file=/tmp/pip.log'.format(command, ' '.join(options)))
<commit_msg>Make pip log world writable<commit_after>
|
"""
Python Blueprint
================
Does not install python itself, only develop and setup tools.
Contains pip helper for other blueprints to use.
**Fabric environment:**
.. code-block:: yaml
blueprints:
- blues.python
"""
from fabric.decorators import task
from refabric.api import run, info
from refabric.context_managers import sudo
from . import debian
__all__ = ['setup']
pip_log_file = '/tmp/pip.log'
@task
def setup():
"""
Install python develop tools
"""
install()
def install():
with sudo():
info('Install python dependencies')
debian.apt_get('install', 'python-dev', 'python-setuptools')
run('easy_install -0 pip')
run('touch {}'.format(pip_log_file))
debian.chmod(pip_log_file, mode=777)
def pip(command, *options):
info('Running pip {}', command)
run('pip {0} {1} -v --log={2} --log-file={2}'.format(command, ' '.join(options), pip_log_file))
|
"""
Python Blueprint
================
Does not install python itself, only develop and setup tools.
Contains pip helper for other blueprints to use.
**Fabric environment:**
.. code-block:: yaml
blueprints:
- blues.python
"""
from fabric.decorators import task
from refabric.api import run, info
from refabric.context_managers import sudo
from . import debian
__all__ = ['setup']
@task
def setup():
"""
Install python develop tools
"""
install()
def install():
with sudo():
info('Install python dependencies')
debian.apt_get('install', 'python-dev', 'python-setuptools')
run('easy_install -0 pip')
def pip(command, *options):
info('Running pip {}', command)
run('pip {} {} -v --log=/tmp/pip.log --log-file=/tmp/pip.log'.format(command, ' '.join(options)))
Make pip log world writable"""
Python Blueprint
================
Does not install python itself, only develop and setup tools.
Contains pip helper for other blueprints to use.
**Fabric environment:**
.. code-block:: yaml
blueprints:
- blues.python
"""
from fabric.decorators import task
from refabric.api import run, info
from refabric.context_managers import sudo
from . import debian
__all__ = ['setup']
pip_log_file = '/tmp/pip.log'
@task
def setup():
"""
Install python develop tools
"""
install()
def install():
with sudo():
info('Install python dependencies')
debian.apt_get('install', 'python-dev', 'python-setuptools')
run('easy_install -0 pip')
run('touch {}'.format(pip_log_file))
debian.chmod(pip_log_file, mode=777)
def pip(command, *options):
info('Running pip {}', command)
run('pip {0} {1} -v --log={2} --log-file={2}'.format(command, ' '.join(options), pip_log_file))
|
<commit_before>"""
Python Blueprint
================
Does not install python itself, only develop and setup tools.
Contains pip helper for other blueprints to use.
**Fabric environment:**
.. code-block:: yaml
blueprints:
- blues.python
"""
from fabric.decorators import task
from refabric.api import run, info
from refabric.context_managers import sudo
from . import debian
__all__ = ['setup']
@task
def setup():
"""
Install python develop tools
"""
install()
def install():
with sudo():
info('Install python dependencies')
debian.apt_get('install', 'python-dev', 'python-setuptools')
run('easy_install -0 pip')
def pip(command, *options):
info('Running pip {}', command)
run('pip {} {} -v --log=/tmp/pip.log --log-file=/tmp/pip.log'.format(command, ' '.join(options)))
<commit_msg>Make pip log world writable<commit_after>"""
Python Blueprint
================
Does not install python itself, only develop and setup tools.
Contains pip helper for other blueprints to use.
**Fabric environment:**
.. code-block:: yaml
blueprints:
- blues.python
"""
from fabric.decorators import task
from refabric.api import run, info
from refabric.context_managers import sudo
from . import debian
__all__ = ['setup']
pip_log_file = '/tmp/pip.log'
@task
def setup():
"""
Install python develop tools
"""
install()
def install():
with sudo():
info('Install python dependencies')
debian.apt_get('install', 'python-dev', 'python-setuptools')
run('easy_install -0 pip')
run('touch {}'.format(pip_log_file))
debian.chmod(pip_log_file, mode=777)
def pip(command, *options):
info('Running pip {}', command)
run('pip {0} {1} -v --log={2} --log-file={2}'.format(command, ' '.join(options), pip_log_file))
|
67f4631faecf672ecc472adb95cabeec53e950eb
|
git.py
|
git.py
|
from subprocess import check_output
def get_sha():
"""Determines Git SHA of current working directory."""
return check_output(["git", "log","-n1","--pretty=oneline"]).split(' ')[0]
|
Add Git SHA read util
|
Add Git SHA read util
|
Python
|
bsd-2-clause
|
Multifarious/fabulous
|
Add Git SHA read util
|
from subprocess import check_output
def get_sha():
"""Determines Git SHA of current working directory."""
return check_output(["git", "log","-n1","--pretty=oneline"]).split(' ')[0]
|
<commit_before><commit_msg>Add Git SHA read util<commit_after>
|
from subprocess import check_output
def get_sha():
"""Determines Git SHA of current working directory."""
return check_output(["git", "log","-n1","--pretty=oneline"]).split(' ')[0]
|
Add Git SHA read utilfrom subprocess import check_output
def get_sha():
"""Determines Git SHA of current working directory."""
return check_output(["git", "log","-n1","--pretty=oneline"]).split(' ')[0]
|
<commit_before><commit_msg>Add Git SHA read util<commit_after>from subprocess import check_output
def get_sha():
"""Determines Git SHA of current working directory."""
return check_output(["git", "log","-n1","--pretty=oneline"]).split(' ')[0]
|
|
3228bf3dd1a32694b42f4d08a5c6f0e63bf5128a
|
all_reports_smell_search_final.py
|
all_reports_smell_search_final.py
|
from map import mapping
# walk through the os and get all files
# read each file in tern and go through line by line
# print lines that contain smell and the report name
from os import listdir
import nltk.data
import json
SMELL_WORDS = ['smell', 'stench', 'stink', 'odour', 'sniff', 'effluvium']
REPORTS_DIR = '/Users/deborah/Documents/scripts/python_work/project2016/Full Text Online'
global finalResult
finalResult = {}
def addToDic(d, report, rDate, val):
d.setDefault(report, []).append(val)
return d
def getFileNames():
'''Retrieve file names'''
fileNames = [f for f in listdir(REPORTS_DIR) if f.endswith('txt')]
return fileNames
def processFile(fileName):
path = REPORTS_DIR + '/' + fileName
references = []
with open(path) as f:
for line in f:
report_tokenized = tokenize(line)
for scentence in report_tokenized:
for word in SMELL_WORDS:
if word in scentence.lower():
references.append(scentence)
return references
def tokenize(sentence):
parser = nltk.data.load('tokenizers/punkt/english.pickle')
result = parser.tokenize(sentence.strip())
return result
def saveObject(results):
'''Save results dictionary as file'''
with open('processed_results.txt', 'w') as outfile:
json.dump(results, outfile)
def performAnalysis(fileName, references):
'''Create the resuts output'''
# splits a fileName into :['Acton', '1900', 'b19783358', 'txt']
splitReport = fileName.split('.')
bID = splitReport[2]
year = splitReport[1]
try:
region = mapping[bID]
except:
return
# print bID
if region in finalResult:
nestedDic = finalResult[region]
else:
nestedDic = {}
nestedDic[year] = references
finalResult[region] = nestedDic
# if nestedDic[splitReport[1]]:
# val = nestedDic[splitReport[1]]
# nestedDic[splitReport[1]] = len(references) + val
# else:
# if len(references):
# nestedDic[splitReport[1]] = len(references)
# # nestedDic.setDefault(splitReport[1], 0).__add__(len(references))
# result[region] = nestedDic
# print(result)
# for k,v in result.iteritems():
def main():
# tokenize(s)
fileNames = getFileNames()
# f1 = fileNames[0]
# processFile(f1)
fileNames = fileNames[:100]
for f in fileNames:
references = processFile(f)
if references:
performAnalysis(f, references)
saveObject(finalResult)
if __name__ == '__main__':
main()
|
Add script to datamine the reports via NLTK
|
Add script to datamine the reports via NLTK
|
Python
|
apache-2.0
|
Smelly-London/Smelly-London,Smelly-London/Smelly-London,Smelly-London/datavisualisation,Smelly-London/Smelly-London,Smelly-London/datavisualisation,Smelly-London/Smelly-London
|
Add script to datamine the reports via NLTK
|
from map import mapping
# walk through the os and get all files
# read each file in tern and go through line by line
# print lines that contain smell and the report name
from os import listdir
import nltk.data
import json
SMELL_WORDS = ['smell', 'stench', 'stink', 'odour', 'sniff', 'effluvium']
REPORTS_DIR = '/Users/deborah/Documents/scripts/python_work/project2016/Full Text Online'
global finalResult
finalResult = {}
def addToDic(d, report, rDate, val):
d.setDefault(report, []).append(val)
return d
def getFileNames():
'''Retrieve file names'''
fileNames = [f for f in listdir(REPORTS_DIR) if f.endswith('txt')]
return fileNames
def processFile(fileName):
path = REPORTS_DIR + '/' + fileName
references = []
with open(path) as f:
for line in f:
report_tokenized = tokenize(line)
for scentence in report_tokenized:
for word in SMELL_WORDS:
if word in scentence.lower():
references.append(scentence)
return references
def tokenize(sentence):
parser = nltk.data.load('tokenizers/punkt/english.pickle')
result = parser.tokenize(sentence.strip())
return result
def saveObject(results):
'''Save results dictionary as file'''
with open('processed_results.txt', 'w') as outfile:
json.dump(results, outfile)
def performAnalysis(fileName, references):
'''Create the resuts output'''
# splits a fileName into :['Acton', '1900', 'b19783358', 'txt']
splitReport = fileName.split('.')
bID = splitReport[2]
year = splitReport[1]
try:
region = mapping[bID]
except:
return
# print bID
if region in finalResult:
nestedDic = finalResult[region]
else:
nestedDic = {}
nestedDic[year] = references
finalResult[region] = nestedDic
# if nestedDic[splitReport[1]]:
# val = nestedDic[splitReport[1]]
# nestedDic[splitReport[1]] = len(references) + val
# else:
# if len(references):
# nestedDic[splitReport[1]] = len(references)
# # nestedDic.setDefault(splitReport[1], 0).__add__(len(references))
# result[region] = nestedDic
# print(result)
# for k,v in result.iteritems():
def main():
# tokenize(s)
fileNames = getFileNames()
# f1 = fileNames[0]
# processFile(f1)
fileNames = fileNames[:100]
for f in fileNames:
references = processFile(f)
if references:
performAnalysis(f, references)
saveObject(finalResult)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to datamine the reports via NLTK<commit_after>
|
from map import mapping
# walk through the os and get all files
# read each file in tern and go through line by line
# print lines that contain smell and the report name
from os import listdir
import nltk.data
import json
SMELL_WORDS = ['smell', 'stench', 'stink', 'odour', 'sniff', 'effluvium']
REPORTS_DIR = '/Users/deborah/Documents/scripts/python_work/project2016/Full Text Online'
global finalResult
finalResult = {}
def addToDic(d, report, rDate, val):
d.setDefault(report, []).append(val)
return d
def getFileNames():
'''Retrieve file names'''
fileNames = [f for f in listdir(REPORTS_DIR) if f.endswith('txt')]
return fileNames
def processFile(fileName):
path = REPORTS_DIR + '/' + fileName
references = []
with open(path) as f:
for line in f:
report_tokenized = tokenize(line)
for scentence in report_tokenized:
for word in SMELL_WORDS:
if word in scentence.lower():
references.append(scentence)
return references
def tokenize(sentence):
parser = nltk.data.load('tokenizers/punkt/english.pickle')
result = parser.tokenize(sentence.strip())
return result
def saveObject(results):
'''Save results dictionary as file'''
with open('processed_results.txt', 'w') as outfile:
json.dump(results, outfile)
def performAnalysis(fileName, references):
'''Create the resuts output'''
# splits a fileName into :['Acton', '1900', 'b19783358', 'txt']
splitReport = fileName.split('.')
bID = splitReport[2]
year = splitReport[1]
try:
region = mapping[bID]
except:
return
# print bID
if region in finalResult:
nestedDic = finalResult[region]
else:
nestedDic = {}
nestedDic[year] = references
finalResult[region] = nestedDic
# if nestedDic[splitReport[1]]:
# val = nestedDic[splitReport[1]]
# nestedDic[splitReport[1]] = len(references) + val
# else:
# if len(references):
# nestedDic[splitReport[1]] = len(references)
# # nestedDic.setDefault(splitReport[1], 0).__add__(len(references))
# result[region] = nestedDic
# print(result)
# for k,v in result.iteritems():
def main():
# tokenize(s)
fileNames = getFileNames()
# f1 = fileNames[0]
# processFile(f1)
fileNames = fileNames[:100]
for f in fileNames:
references = processFile(f)
if references:
performAnalysis(f, references)
saveObject(finalResult)
if __name__ == '__main__':
main()
|
Add script to datamine the reports via NLTK
from map import mapping
# walk through the os and get all files
# read each file in tern and go through line by line
# print lines that contain smell and the report name
from os import listdir
import nltk.data
import json
SMELL_WORDS = ['smell', 'stench', 'stink', 'odour', 'sniff', 'effluvium']
REPORTS_DIR = '/Users/deborah/Documents/scripts/python_work/project2016/Full Text Online'
global finalResult
finalResult = {}
def addToDic(d, report, rDate, val):
d.setDefault(report, []).append(val)
return d
def getFileNames():
'''Retrieve file names'''
fileNames = [f for f in listdir(REPORTS_DIR) if f.endswith('txt')]
return fileNames
def processFile(fileName):
path = REPORTS_DIR + '/' + fileName
references = []
with open(path) as f:
for line in f:
report_tokenized = tokenize(line)
for scentence in report_tokenized:
for word in SMELL_WORDS:
if word in scentence.lower():
references.append(scentence)
return references
def tokenize(sentence):
parser = nltk.data.load('tokenizers/punkt/english.pickle')
result = parser.tokenize(sentence.strip())
return result
def saveObject(results):
'''Save results dictionary as file'''
with open('processed_results.txt', 'w') as outfile:
json.dump(results, outfile)
def performAnalysis(fileName, references):
'''Create the resuts output'''
# splits a fileName into :['Acton', '1900', 'b19783358', 'txt']
splitReport = fileName.split('.')
bID = splitReport[2]
year = splitReport[1]
try:
region = mapping[bID]
except:
return
# print bID
if region in finalResult:
nestedDic = finalResult[region]
else:
nestedDic = {}
nestedDic[year] = references
finalResult[region] = nestedDic
# if nestedDic[splitReport[1]]:
# val = nestedDic[splitReport[1]]
# nestedDic[splitReport[1]] = len(references) + val
# else:
# if len(references):
# nestedDic[splitReport[1]] = len(references)
# # nestedDic.setDefault(splitReport[1], 0).__add__(len(references))
# result[region] = nestedDic
# print(result)
# for k,v in result.iteritems():
def main():
# tokenize(s)
fileNames = getFileNames()
# f1 = fileNames[0]
# processFile(f1)
fileNames = fileNames[:100]
for f in fileNames:
references = processFile(f)
if references:
performAnalysis(f, references)
saveObject(finalResult)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to datamine the reports via NLTK<commit_after>
from map import mapping
# walk through the os and get all files
# read each file in tern and go through line by line
# print lines that contain smell and the report name
from os import listdir
import nltk.data
import json
SMELL_WORDS = ['smell', 'stench', 'stink', 'odour', 'sniff', 'effluvium']
REPORTS_DIR = '/Users/deborah/Documents/scripts/python_work/project2016/Full Text Online'
global finalResult
finalResult = {}
def addToDic(d, report, rDate, val):
d.setDefault(report, []).append(val)
return d
def getFileNames():
'''Retrieve file names'''
fileNames = [f for f in listdir(REPORTS_DIR) if f.endswith('txt')]
return fileNames
def processFile(fileName):
path = REPORTS_DIR + '/' + fileName
references = []
with open(path) as f:
for line in f:
report_tokenized = tokenize(line)
for scentence in report_tokenized:
for word in SMELL_WORDS:
if word in scentence.lower():
references.append(scentence)
return references
def tokenize(sentence):
parser = nltk.data.load('tokenizers/punkt/english.pickle')
result = parser.tokenize(sentence.strip())
return result
def saveObject(results):
'''Save results dictionary as file'''
with open('processed_results.txt', 'w') as outfile:
json.dump(results, outfile)
def performAnalysis(fileName, references):
'''Create the resuts output'''
# splits a fileName into :['Acton', '1900', 'b19783358', 'txt']
splitReport = fileName.split('.')
bID = splitReport[2]
year = splitReport[1]
try:
region = mapping[bID]
except:
return
# print bID
if region in finalResult:
nestedDic = finalResult[region]
else:
nestedDic = {}
nestedDic[year] = references
finalResult[region] = nestedDic
# if nestedDic[splitReport[1]]:
# val = nestedDic[splitReport[1]]
# nestedDic[splitReport[1]] = len(references) + val
# else:
# if len(references):
# nestedDic[splitReport[1]] = len(references)
# # nestedDic.setDefault(splitReport[1], 0).__add__(len(references))
# result[region] = nestedDic
# print(result)
# for k,v in result.iteritems():
def main():
# tokenize(s)
fileNames = getFileNames()
# f1 = fileNames[0]
# processFile(f1)
fileNames = fileNames[:100]
for f in fileNames:
references = processFile(f)
if references:
performAnalysis(f, references)
saveObject(finalResult)
if __name__ == '__main__':
main()
|
|
21c5e1f52b4f50b146e480f68d10da73cf5306d3
|
backend/scripts/addusertoproj.py
|
backend/scripts/addusertoproj.py
|
#!/usr/bin/env python
import rethinkdb as r
from optparse import OptionParser
import sys
class Access(object):
def __init__(self, user_id, project_id, project_name):
self.user_id = user_id
self.project_id = project_id
self.project_name = project_name
self.dataset = ""
self.permissions = ""
self.status = ""
self.birthtime = r.now()
self.mtime = self.birthtime
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-P", "--port", dest="port", type="int",
help="rethinkdb port", default=30815)
parser.add_option("-u", "--user", dest="user",
help="user id", type="string")
parser.add_option("-p", "--project", dest="project", help="project id", type="string")
(options, args) = parser.parse_args()
conn = r.connect('localhost', options.port, db='materialscommons')
if options.user is None:
print "You must specify a user id"
sys.exit(1)
if options.project is None:
print "You must specify a project"
sys.exit(1)
proj = r.table('projects').get(options.project).run(conn)
if proj is None:
print "No such project"
sys.exit(1)
a = Access(options.user, options.project, proj.name)
r.table('access').insert(a.__dict__).run(conn)
print "User added to project"
|
Add script to add a user to a project
|
Add script to add a user to a project
|
Python
|
mit
|
materials-commons/materialscommons.org,materials-commons/materialscommons.org,materials-commons/materialscommons.org,materials-commons/materialscommons.org,materials-commons/materialscommons.org
|
Add script to add a user to a project
|
#!/usr/bin/env python
import rethinkdb as r
from optparse import OptionParser
import sys
class Access(object):
def __init__(self, user_id, project_id, project_name):
self.user_id = user_id
self.project_id = project_id
self.project_name = project_name
self.dataset = ""
self.permissions = ""
self.status = ""
self.birthtime = r.now()
self.mtime = self.birthtime
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-P", "--port", dest="port", type="int",
help="rethinkdb port", default=30815)
parser.add_option("-u", "--user", dest="user",
help="user id", type="string")
parser.add_option("-p", "--project", dest="project", help="project id", type="string")
(options, args) = parser.parse_args()
conn = r.connect('localhost', options.port, db='materialscommons')
if options.user is None:
print "You must specify a user id"
sys.exit(1)
if options.project is None:
print "You must specify a project"
sys.exit(1)
proj = r.table('projects').get(options.project).run(conn)
if proj is None:
print "No such project"
sys.exit(1)
a = Access(options.user, options.project, proj.name)
r.table('access').insert(a.__dict__).run(conn)
print "User added to project"
|
<commit_before><commit_msg>Add script to add a user to a project<commit_after>
|
#!/usr/bin/env python
import rethinkdb as r
from optparse import OptionParser
import sys
class Access(object):
def __init__(self, user_id, project_id, project_name):
self.user_id = user_id
self.project_id = project_id
self.project_name = project_name
self.dataset = ""
self.permissions = ""
self.status = ""
self.birthtime = r.now()
self.mtime = self.birthtime
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-P", "--port", dest="port", type="int",
help="rethinkdb port", default=30815)
parser.add_option("-u", "--user", dest="user",
help="user id", type="string")
parser.add_option("-p", "--project", dest="project", help="project id", type="string")
(options, args) = parser.parse_args()
conn = r.connect('localhost', options.port, db='materialscommons')
if options.user is None:
print "You must specify a user id"
sys.exit(1)
if options.project is None:
print "You must specify a project"
sys.exit(1)
proj = r.table('projects').get(options.project).run(conn)
if proj is None:
print "No such project"
sys.exit(1)
a = Access(options.user, options.project, proj.name)
r.table('access').insert(a.__dict__).run(conn)
print "User added to project"
|
Add script to add a user to a project#!/usr/bin/env python
import rethinkdb as r
from optparse import OptionParser
import sys
class Access(object):
def __init__(self, user_id, project_id, project_name):
self.user_id = user_id
self.project_id = project_id
self.project_name = project_name
self.dataset = ""
self.permissions = ""
self.status = ""
self.birthtime = r.now()
self.mtime = self.birthtime
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-P", "--port", dest="port", type="int",
help="rethinkdb port", default=30815)
parser.add_option("-u", "--user", dest="user",
help="user id", type="string")
parser.add_option("-p", "--project", dest="project", help="project id", type="string")
(options, args) = parser.parse_args()
conn = r.connect('localhost', options.port, db='materialscommons')
if options.user is None:
print "You must specify a user id"
sys.exit(1)
if options.project is None:
print "You must specify a project"
sys.exit(1)
proj = r.table('projects').get(options.project).run(conn)
if proj is None:
print "No such project"
sys.exit(1)
a = Access(options.user, options.project, proj.name)
r.table('access').insert(a.__dict__).run(conn)
print "User added to project"
|
<commit_before><commit_msg>Add script to add a user to a project<commit_after>#!/usr/bin/env python
import rethinkdb as r
from optparse import OptionParser
import sys
class Access(object):
def __init__(self, user_id, project_id, project_name):
self.user_id = user_id
self.project_id = project_id
self.project_name = project_name
self.dataset = ""
self.permissions = ""
self.status = ""
self.birthtime = r.now()
self.mtime = self.birthtime
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-P", "--port", dest="port", type="int",
help="rethinkdb port", default=30815)
parser.add_option("-u", "--user", dest="user",
help="user id", type="string")
parser.add_option("-p", "--project", dest="project", help="project id", type="string")
(options, args) = parser.parse_args()
conn = r.connect('localhost', options.port, db='materialscommons')
if options.user is None:
print "You must specify a user id"
sys.exit(1)
if options.project is None:
print "You must specify a project"
sys.exit(1)
proj = r.table('projects').get(options.project).run(conn)
if proj is None:
print "No such project"
sys.exit(1)
a = Access(options.user, options.project, proj.name)
r.table('access').insert(a.__dict__).run(conn)
print "User added to project"
|
|
a2120c11eb50553ea74fd615a446e0bd2db07ea0
|
tests/python/email_processor.py
|
tests/python/email_processor.py
|
import email
import imaplib
import os
import sys
class EmailProcessor():
mail = None
def __init__(self, user, password, host='mail.mega.co.nz', port=993):
self.mail = imaplib.IMAP4_SSL(host, port)
self.mail.login(user, password)
self.mail.select('Inbox')
def get_validation_link_from_email(self, to, intent, delete=False):
"""Get validation link from email."""
link = None
messages = self.get_message_from_email(to)
if not messages:
return None
for message in messages:
sub = list(message)[0]
text = message[sub]
if not text[1]:
continue
for line in text[1].splitlines():
if line.startswith('https://') and ('#' + intent) in line:
link = line.strip()
break
if link: # already found it, quit
#if delete:
# This should move the email to Trash! But it doesn't work
#self.mail.store(text[0], '+X-GM-LABELS', '\\Trash')
break
self.mail.close()
self.mail.logout()
return link
def get_message_from_email(self, to):
"""Get message from email."""
# sort all emails sent to the right address, newest first
result, data = self.mail.sort('REVERSE DATE', 'UTF-8', '(To "{}")'.format(to))
if len(data[0]) == 0:
return None
# get all potential emails
messages = []
for n in data[0].split():
_, d = self.mail.fetch(n, "(RFC822)")
body = d[0][1]
msg = email.message_from_string(body.decode('utf-8'))
subject = msg['subject']
text = None
for m in msg.walk():
content_type = m.get_content_type()
if content_type == 'text/plain':
text = m.get_payload(decode=True).decode('raw-unicode-escape')
break
if text:
messages.append({subject: [n, text]})
return messages
user = os.getenv('TEST_USER') or sys.argv[1]
password = os.getenv('TEST_PASS') or sys.argv[2]
to = sys.argv[3]
intent = ""
if sys.argv[4] == "confirm":
intent = "confirm"
elif sys.argv[4] == "delete":
intent = "cancel"
ep = EmailProcessor(user, password)
link = ep.get_validation_link_from_email(to, intent, delete=True)
if link:
print(link, end = '')
|
Add Python script used to retrieve confirmation links from emails
|
Add Python script used to retrieve confirmation links from emails
|
Python
|
bsd-2-clause
|
meganz/sdk,meganz/sdk,meganz/sdk,meganz/sdk,meganz/sdk,meganz/sdk,meganz/sdk
|
Add Python script used to retrieve confirmation links from emails
|
import email
import imaplib
import os
import sys
class EmailProcessor():
mail = None
def __init__(self, user, password, host='mail.mega.co.nz', port=993):
self.mail = imaplib.IMAP4_SSL(host, port)
self.mail.login(user, password)
self.mail.select('Inbox')
def get_validation_link_from_email(self, to, intent, delete=False):
"""Get validation link from email."""
link = None
messages = self.get_message_from_email(to)
if not messages:
return None
for message in messages:
sub = list(message)[0]
text = message[sub]
if not text[1]:
continue
for line in text[1].splitlines():
if line.startswith('https://') and ('#' + intent) in line:
link = line.strip()
break
if link: # already found it, quit
#if delete:
# This should move the email to Trash! But it doesn't work
#self.mail.store(text[0], '+X-GM-LABELS', '\\Trash')
break
self.mail.close()
self.mail.logout()
return link
def get_message_from_email(self, to):
"""Get message from email."""
# sort all emails sent to the right address, newest first
result, data = self.mail.sort('REVERSE DATE', 'UTF-8', '(To "{}")'.format(to))
if len(data[0]) == 0:
return None
# get all potential emails
messages = []
for n in data[0].split():
_, d = self.mail.fetch(n, "(RFC822)")
body = d[0][1]
msg = email.message_from_string(body.decode('utf-8'))
subject = msg['subject']
text = None
for m in msg.walk():
content_type = m.get_content_type()
if content_type == 'text/plain':
text = m.get_payload(decode=True).decode('raw-unicode-escape')
break
if text:
messages.append({subject: [n, text]})
return messages
user = os.getenv('TEST_USER') or sys.argv[1]
password = os.getenv('TEST_PASS') or sys.argv[2]
to = sys.argv[3]
intent = ""
if sys.argv[4] == "confirm":
intent = "confirm"
elif sys.argv[4] == "delete":
intent = "cancel"
ep = EmailProcessor(user, password)
link = ep.get_validation_link_from_email(to, intent, delete=True)
if link:
print(link, end = '')
|
<commit_before><commit_msg>Add Python script used to retrieve confirmation links from emails<commit_after>
|
import email
import imaplib
import os
import sys
class EmailProcessor():
mail = None
def __init__(self, user, password, host='mail.mega.co.nz', port=993):
self.mail = imaplib.IMAP4_SSL(host, port)
self.mail.login(user, password)
self.mail.select('Inbox')
def get_validation_link_from_email(self, to, intent, delete=False):
"""Get validation link from email."""
link = None
messages = self.get_message_from_email(to)
if not messages:
return None
for message in messages:
sub = list(message)[0]
text = message[sub]
if not text[1]:
continue
for line in text[1].splitlines():
if line.startswith('https://') and ('#' + intent) in line:
link = line.strip()
break
if link: # already found it, quit
#if delete:
# This should move the email to Trash! But it doesn't work
#self.mail.store(text[0], '+X-GM-LABELS', '\\Trash')
break
self.mail.close()
self.mail.logout()
return link
def get_message_from_email(self, to):
"""Get message from email."""
# sort all emails sent to the right address, newest first
result, data = self.mail.sort('REVERSE DATE', 'UTF-8', '(To "{}")'.format(to))
if len(data[0]) == 0:
return None
# get all potential emails
messages = []
for n in data[0].split():
_, d = self.mail.fetch(n, "(RFC822)")
body = d[0][1]
msg = email.message_from_string(body.decode('utf-8'))
subject = msg['subject']
text = None
for m in msg.walk():
content_type = m.get_content_type()
if content_type == 'text/plain':
text = m.get_payload(decode=True).decode('raw-unicode-escape')
break
if text:
messages.append({subject: [n, text]})
return messages
user = os.getenv('TEST_USER') or sys.argv[1]
password = os.getenv('TEST_PASS') or sys.argv[2]
to = sys.argv[3]
intent = ""
if sys.argv[4] == "confirm":
intent = "confirm"
elif sys.argv[4] == "delete":
intent = "cancel"
ep = EmailProcessor(user, password)
link = ep.get_validation_link_from_email(to, intent, delete=True)
if link:
print(link, end = '')
|
Add Python script used to retrieve confirmation links from emailsimport email
import imaplib
import os
import sys
class EmailProcessor():
mail = None
def __init__(self, user, password, host='mail.mega.co.nz', port=993):
self.mail = imaplib.IMAP4_SSL(host, port)
self.mail.login(user, password)
self.mail.select('Inbox')
def get_validation_link_from_email(self, to, intent, delete=False):
"""Get validation link from email."""
link = None
messages = self.get_message_from_email(to)
if not messages:
return None
for message in messages:
sub = list(message)[0]
text = message[sub]
if not text[1]:
continue
for line in text[1].splitlines():
if line.startswith('https://') and ('#' + intent) in line:
link = line.strip()
break
if link: # already found it, quit
#if delete:
# This should move the email to Trash! But it doesn't work
#self.mail.store(text[0], '+X-GM-LABELS', '\\Trash')
break
self.mail.close()
self.mail.logout()
return link
def get_message_from_email(self, to):
"""Get message from email."""
# sort all emails sent to the right address, newest first
result, data = self.mail.sort('REVERSE DATE', 'UTF-8', '(To "{}")'.format(to))
if len(data[0]) == 0:
return None
# get all potential emails
messages = []
for n in data[0].split():
_, d = self.mail.fetch(n, "(RFC822)")
body = d[0][1]
msg = email.message_from_string(body.decode('utf-8'))
subject = msg['subject']
text = None
for m in msg.walk():
content_type = m.get_content_type()
if content_type == 'text/plain':
text = m.get_payload(decode=True).decode('raw-unicode-escape')
break
if text:
messages.append({subject: [n, text]})
return messages
user = os.getenv('TEST_USER') or sys.argv[1]
password = os.getenv('TEST_PASS') or sys.argv[2]
to = sys.argv[3]
intent = ""
if sys.argv[4] == "confirm":
intent = "confirm"
elif sys.argv[4] == "delete":
intent = "cancel"
ep = EmailProcessor(user, password)
link = ep.get_validation_link_from_email(to, intent, delete=True)
if link:
print(link, end = '')
|
<commit_before><commit_msg>Add Python script used to retrieve confirmation links from emails<commit_after>import email
import imaplib
import os
import sys
class EmailProcessor():
mail = None
def __init__(self, user, password, host='mail.mega.co.nz', port=993):
self.mail = imaplib.IMAP4_SSL(host, port)
self.mail.login(user, password)
self.mail.select('Inbox')
def get_validation_link_from_email(self, to, intent, delete=False):
"""Get validation link from email."""
link = None
messages = self.get_message_from_email(to)
if not messages:
return None
for message in messages:
sub = list(message)[0]
text = message[sub]
if not text[1]:
continue
for line in text[1].splitlines():
if line.startswith('https://') and ('#' + intent) in line:
link = line.strip()
break
if link: # already found it, quit
#if delete:
# This should move the email to Trash! But it doesn't work
#self.mail.store(text[0], '+X-GM-LABELS', '\\Trash')
break
self.mail.close()
self.mail.logout()
return link
def get_message_from_email(self, to):
"""Get message from email."""
# sort all emails sent to the right address, newest first
result, data = self.mail.sort('REVERSE DATE', 'UTF-8', '(To "{}")'.format(to))
if len(data[0]) == 0:
return None
# get all potential emails
messages = []
for n in data[0].split():
_, d = self.mail.fetch(n, "(RFC822)")
body = d[0][1]
msg = email.message_from_string(body.decode('utf-8'))
subject = msg['subject']
text = None
for m in msg.walk():
content_type = m.get_content_type()
if content_type == 'text/plain':
text = m.get_payload(decode=True).decode('raw-unicode-escape')
break
if text:
messages.append({subject: [n, text]})
return messages
user = os.getenv('TEST_USER') or sys.argv[1]
password = os.getenv('TEST_PASS') or sys.argv[2]
to = sys.argv[3]
intent = ""
if sys.argv[4] == "confirm":
intent = "confirm"
elif sys.argv[4] == "delete":
intent = "cancel"
ep = EmailProcessor(user, password)
link = ep.get_validation_link_from_email(to, intent, delete=True)
if link:
print(link, end = '')
|
|
6548c29e87945e22b002bc25323983319cca914c
|
normandy/recipes/migrations/0014_auto_20190228_1128.py
|
normandy/recipes/migrations/0014_auto_20190228_1128.py
|
# Generated by Django 2.0.13 on 2019-02-28 11:28
import json
from urllib.parse import unquote_plus
from django.db import migrations
def remove_signatures(apps, schema_editor):
Recipe = apps.get_model("recipes", "Recipe")
Action = apps.get_model("recipes", "Action")
Signature = apps.get_model("recipes", "Signature")
for recipe in Recipe.objects.exclude(signature=None):
sig = recipe.signature
recipe.signature = None
recipe.save()
sig.delete()
for action in Action.objects.exclude(signature=None):
sig = action.signature
action.signature = None
action.save()
sig.delete()
for sig in Signature.objects.all():
sig.delete()
def add_extension_id(apps, schema_editor):
Action = apps.get_model("recipes", "Action")
RecipeRevision = apps.get_model("recipes", "RecipeRevision")
Extension = apps.get_model("studies", "Extension")
action = Action.objects.get(name="opt-out-study")
revisions = RecipeRevision.objects.filter(action_id=action.id)
for revision in revisions:
arguments = json.loads(revision.arguments_json)
url = arguments.get("addonUrl")
filename = unquote_plus(url.split("/extensions/").pop())
extension = Extension.objects.get(xpi=f"extensions/{filename}")
arguments["extensionApiId"] = extension.id
revision.arguments_json = json.dumps(arguments)
revision.save()
# Remove signatures to prompt resigning
remove_signatures(apps, schema_editor)
def remove_extension_id(apps, schema_editor):
Action = apps.get_model("recipes", "Action")
RecipeRevision = apps.get_model("recipes", "RecipeRevision")
action = Action.objects.get(name="opt-out-study")
revisions = RecipeRevision.objects.filter(action_id=action.id)
for revision in revisions:
arguments = json.loads(revision.arguments_json)
if "extensionApiId" in arguments:
arguments.pop("extensionApiId")
revision.arguments_json = json.dumps(arguments)
revision.save()
# Remove signatures to prompt resigning
remove_signatures(apps, schema_editor)
class Migration(migrations.Migration):
dependencies = [
("recipes", "0013_auto_20181018_2049"),
("studies", "0006_extension_hash_algorithm"),
]
operations = [migrations.RunPython(add_extension_id, remove_extension_id)]
|
Add migration to add extension ID to addon study recipes
|
Add migration to add extension ID to addon study recipes
|
Python
|
mpl-2.0
|
mozilla/normandy,mozilla/normandy,mozilla/normandy,mozilla/normandy
|
Add migration to add extension ID to addon study recipes
|
# Generated by Django 2.0.13 on 2019-02-28 11:28
import json
from urllib.parse import unquote_plus
from django.db import migrations
def remove_signatures(apps, schema_editor):
Recipe = apps.get_model("recipes", "Recipe")
Action = apps.get_model("recipes", "Action")
Signature = apps.get_model("recipes", "Signature")
for recipe in Recipe.objects.exclude(signature=None):
sig = recipe.signature
recipe.signature = None
recipe.save()
sig.delete()
for action in Action.objects.exclude(signature=None):
sig = action.signature
action.signature = None
action.save()
sig.delete()
for sig in Signature.objects.all():
sig.delete()
def add_extension_id(apps, schema_editor):
Action = apps.get_model("recipes", "Action")
RecipeRevision = apps.get_model("recipes", "RecipeRevision")
Extension = apps.get_model("studies", "Extension")
action = Action.objects.get(name="opt-out-study")
revisions = RecipeRevision.objects.filter(action_id=action.id)
for revision in revisions:
arguments = json.loads(revision.arguments_json)
url = arguments.get("addonUrl")
filename = unquote_plus(url.split("/extensions/").pop())
extension = Extension.objects.get(xpi=f"extensions/{filename}")
arguments["extensionApiId"] = extension.id
revision.arguments_json = json.dumps(arguments)
revision.save()
# Remove signatures to prompt resigning
remove_signatures(apps, schema_editor)
def remove_extension_id(apps, schema_editor):
Action = apps.get_model("recipes", "Action")
RecipeRevision = apps.get_model("recipes", "RecipeRevision")
action = Action.objects.get(name="opt-out-study")
revisions = RecipeRevision.objects.filter(action_id=action.id)
for revision in revisions:
arguments = json.loads(revision.arguments_json)
if "extensionApiId" in arguments:
arguments.pop("extensionApiId")
revision.arguments_json = json.dumps(arguments)
revision.save()
# Remove signatures to prompt resigning
remove_signatures(apps, schema_editor)
class Migration(migrations.Migration):
dependencies = [
("recipes", "0013_auto_20181018_2049"),
("studies", "0006_extension_hash_algorithm"),
]
operations = [migrations.RunPython(add_extension_id, remove_extension_id)]
|
<commit_before><commit_msg>Add migration to add extension ID to addon study recipes<commit_after>
|
# Generated by Django 2.0.13 on 2019-02-28 11:28
import json
from urllib.parse import unquote_plus
from django.db import migrations
def remove_signatures(apps, schema_editor):
Recipe = apps.get_model("recipes", "Recipe")
Action = apps.get_model("recipes", "Action")
Signature = apps.get_model("recipes", "Signature")
for recipe in Recipe.objects.exclude(signature=None):
sig = recipe.signature
recipe.signature = None
recipe.save()
sig.delete()
for action in Action.objects.exclude(signature=None):
sig = action.signature
action.signature = None
action.save()
sig.delete()
for sig in Signature.objects.all():
sig.delete()
def add_extension_id(apps, schema_editor):
Action = apps.get_model("recipes", "Action")
RecipeRevision = apps.get_model("recipes", "RecipeRevision")
Extension = apps.get_model("studies", "Extension")
action = Action.objects.get(name="opt-out-study")
revisions = RecipeRevision.objects.filter(action_id=action.id)
for revision in revisions:
arguments = json.loads(revision.arguments_json)
url = arguments.get("addonUrl")
filename = unquote_plus(url.split("/extensions/").pop())
extension = Extension.objects.get(xpi=f"extensions/{filename}")
arguments["extensionApiId"] = extension.id
revision.arguments_json = json.dumps(arguments)
revision.save()
# Remove signatures to prompt resigning
remove_signatures(apps, schema_editor)
def remove_extension_id(apps, schema_editor):
Action = apps.get_model("recipes", "Action")
RecipeRevision = apps.get_model("recipes", "RecipeRevision")
action = Action.objects.get(name="opt-out-study")
revisions = RecipeRevision.objects.filter(action_id=action.id)
for revision in revisions:
arguments = json.loads(revision.arguments_json)
if "extensionApiId" in arguments:
arguments.pop("extensionApiId")
revision.arguments_json = json.dumps(arguments)
revision.save()
# Remove signatures to prompt resigning
remove_signatures(apps, schema_editor)
class Migration(migrations.Migration):
dependencies = [
("recipes", "0013_auto_20181018_2049"),
("studies", "0006_extension_hash_algorithm"),
]
operations = [migrations.RunPython(add_extension_id, remove_extension_id)]
|
Add migration to add extension ID to addon study recipes# Generated by Django 2.0.13 on 2019-02-28 11:28
import json
from urllib.parse import unquote_plus
from django.db import migrations
def remove_signatures(apps, schema_editor):
Recipe = apps.get_model("recipes", "Recipe")
Action = apps.get_model("recipes", "Action")
Signature = apps.get_model("recipes", "Signature")
for recipe in Recipe.objects.exclude(signature=None):
sig = recipe.signature
recipe.signature = None
recipe.save()
sig.delete()
for action in Action.objects.exclude(signature=None):
sig = action.signature
action.signature = None
action.save()
sig.delete()
for sig in Signature.objects.all():
sig.delete()
def add_extension_id(apps, schema_editor):
Action = apps.get_model("recipes", "Action")
RecipeRevision = apps.get_model("recipes", "RecipeRevision")
Extension = apps.get_model("studies", "Extension")
action = Action.objects.get(name="opt-out-study")
revisions = RecipeRevision.objects.filter(action_id=action.id)
for revision in revisions:
arguments = json.loads(revision.arguments_json)
url = arguments.get("addonUrl")
filename = unquote_plus(url.split("/extensions/").pop())
extension = Extension.objects.get(xpi=f"extensions/{filename}")
arguments["extensionApiId"] = extension.id
revision.arguments_json = json.dumps(arguments)
revision.save()
# Remove signatures to prompt resigning
remove_signatures(apps, schema_editor)
def remove_extension_id(apps, schema_editor):
Action = apps.get_model("recipes", "Action")
RecipeRevision = apps.get_model("recipes", "RecipeRevision")
action = Action.objects.get(name="opt-out-study")
revisions = RecipeRevision.objects.filter(action_id=action.id)
for revision in revisions:
arguments = json.loads(revision.arguments_json)
if "extensionApiId" in arguments:
arguments.pop("extensionApiId")
revision.arguments_json = json.dumps(arguments)
revision.save()
# Remove signatures to prompt resigning
remove_signatures(apps, schema_editor)
class Migration(migrations.Migration):
dependencies = [
("recipes", "0013_auto_20181018_2049"),
("studies", "0006_extension_hash_algorithm"),
]
operations = [migrations.RunPython(add_extension_id, remove_extension_id)]
|
<commit_before><commit_msg>Add migration to add extension ID to addon study recipes<commit_after># Generated by Django 2.0.13 on 2019-02-28 11:28
import json
from urllib.parse import unquote_plus
from django.db import migrations
def remove_signatures(apps, schema_editor):
Recipe = apps.get_model("recipes", "Recipe")
Action = apps.get_model("recipes", "Action")
Signature = apps.get_model("recipes", "Signature")
for recipe in Recipe.objects.exclude(signature=None):
sig = recipe.signature
recipe.signature = None
recipe.save()
sig.delete()
for action in Action.objects.exclude(signature=None):
sig = action.signature
action.signature = None
action.save()
sig.delete()
for sig in Signature.objects.all():
sig.delete()
def add_extension_id(apps, schema_editor):
Action = apps.get_model("recipes", "Action")
RecipeRevision = apps.get_model("recipes", "RecipeRevision")
Extension = apps.get_model("studies", "Extension")
action = Action.objects.get(name="opt-out-study")
revisions = RecipeRevision.objects.filter(action_id=action.id)
for revision in revisions:
arguments = json.loads(revision.arguments_json)
url = arguments.get("addonUrl")
filename = unquote_plus(url.split("/extensions/").pop())
extension = Extension.objects.get(xpi=f"extensions/{filename}")
arguments["extensionApiId"] = extension.id
revision.arguments_json = json.dumps(arguments)
revision.save()
# Remove signatures to prompt resigning
remove_signatures(apps, schema_editor)
def remove_extension_id(apps, schema_editor):
Action = apps.get_model("recipes", "Action")
RecipeRevision = apps.get_model("recipes", "RecipeRevision")
action = Action.objects.get(name="opt-out-study")
revisions = RecipeRevision.objects.filter(action_id=action.id)
for revision in revisions:
arguments = json.loads(revision.arguments_json)
if "extensionApiId" in arguments:
arguments.pop("extensionApiId")
revision.arguments_json = json.dumps(arguments)
revision.save()
# Remove signatures to prompt resigning
remove_signatures(apps, schema_editor)
class Migration(migrations.Migration):
dependencies = [
("recipes", "0013_auto_20181018_2049"),
("studies", "0006_extension_hash_algorithm"),
]
operations = [migrations.RunPython(add_extension_id, remove_extension_id)]
|
|
f42052078a08a9707e2030a3352a03f80a6485b2
|
calexicon/internal/gregorian.py
|
calexicon/internal/gregorian.py
|
def is_gregorian_leap_year(y):
if (y % 400) == 0:
return True
if (y % 100) == 0:
return False
if (y % 4) == 0:
return True
return False # pragma: no cover
def days_in_month(year, month):
if month == 2 and is_gregorian_leap_year(year):
return 29
return [None, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31][month]
|
Create days_in_month fn in new internal module.
|
Create days_in_month fn in new internal module.
|
Python
|
apache-2.0
|
jwg4/qual,jwg4/calexicon
|
Create days_in_month fn in new internal module.
|
def is_gregorian_leap_year(y):
if (y % 400) == 0:
return True
if (y % 100) == 0:
return False
if (y % 4) == 0:
return True
return False # pragma: no cover
def days_in_month(year, month):
if month == 2 and is_gregorian_leap_year(year):
return 29
return [None, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31][month]
|
<commit_before><commit_msg>Create days_in_month fn in new internal module.<commit_after>
|
def is_gregorian_leap_year(y):
if (y % 400) == 0:
return True
if (y % 100) == 0:
return False
if (y % 4) == 0:
return True
return False # pragma: no cover
def days_in_month(year, month):
if month == 2 and is_gregorian_leap_year(year):
return 29
return [None, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31][month]
|
Create days_in_month fn in new internal module.def is_gregorian_leap_year(y):
if (y % 400) == 0:
return True
if (y % 100) == 0:
return False
if (y % 4) == 0:
return True
return False # pragma: no cover
def days_in_month(year, month):
if month == 2 and is_gregorian_leap_year(year):
return 29
return [None, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31][month]
|
<commit_before><commit_msg>Create days_in_month fn in new internal module.<commit_after>def is_gregorian_leap_year(y):
if (y % 400) == 0:
return True
if (y % 100) == 0:
return False
if (y % 4) == 0:
return True
return False # pragma: no cover
def days_in_month(year, month):
if month == 2 and is_gregorian_leap_year(year):
return 29
return [None, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31][month]
|
|
a831393edb8493ecf6185fb52b58a9053d810ead
|
dissemin/tcp.py
|
dissemin/tcp.py
|
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
def orcid_base_domain(request):
from django.conf import settings
return {'ORCID_BASE_DOMAIN':settings.ORCID_BASE_DOMAIN}
|
Add missing template context processor
|
Add missing template context processor
|
Python
|
agpl-3.0
|
dissemin/dissemin,wetneb/dissemin,wetneb/dissemin,dissemin/dissemin,dissemin/dissemin,dissemin/dissemin,wetneb/dissemin,dissemin/dissemin,wetneb/dissemin
|
Add missing template context processor
|
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
def orcid_base_domain(request):
from django.conf import settings
return {'ORCID_BASE_DOMAIN':settings.ORCID_BASE_DOMAIN}
|
<commit_before><commit_msg>Add missing template context processor<commit_after>
|
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
def orcid_base_domain(request):
from django.conf import settings
return {'ORCID_BASE_DOMAIN':settings.ORCID_BASE_DOMAIN}
|
Add missing template context processor# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
def orcid_base_domain(request):
from django.conf import settings
return {'ORCID_BASE_DOMAIN':settings.ORCID_BASE_DOMAIN}
|
<commit_before><commit_msg>Add missing template context processor<commit_after># -*- encoding: utf-8 -*-
from __future__ import unicode_literals
def orcid_base_domain(request):
from django.conf import settings
return {'ORCID_BASE_DOMAIN':settings.ORCID_BASE_DOMAIN}
|
|
ff7c952e991d6bb6b47d02ec5fc9b66584187cc2
|
fake_player_data.py
|
fake_player_data.py
|
"""Generate fake data for testing purposes."""
from faker import Faker
from random import randint
from GoPlayer import Player
from pprint import pprint
fake = Faker()
names = [fake.name() for x in range(49)]
ids = [x for x in range(49)]
ranks = [randint(-30, 7) for x in range(49)]
player_info = list(zip(names, ids, ranks))
player_list = []
for rec in player_info:
player_list.append(Player(name=rec[0], aga_id=rec[1],
aga_rating=rec[2]))
|
Create fake data for testing
|
Create fake data for testing
|
Python
|
mit
|
unyth/tournament_graph
|
Create fake data for testing
|
"""Generate fake data for testing purposes."""
from faker import Faker
from random import randint
from GoPlayer import Player
from pprint import pprint
fake = Faker()
names = [fake.name() for x in range(49)]
ids = [x for x in range(49)]
ranks = [randint(-30, 7) for x in range(49)]
player_info = list(zip(names, ids, ranks))
player_list = []
for rec in player_info:
player_list.append(Player(name=rec[0], aga_id=rec[1],
aga_rating=rec[2]))
|
<commit_before><commit_msg>Create fake data for testing<commit_after>
|
"""Generate fake data for testing purposes."""
from faker import Faker
from random import randint
from GoPlayer import Player
from pprint import pprint
fake = Faker()
names = [fake.name() for x in range(49)]
ids = [x for x in range(49)]
ranks = [randint(-30, 7) for x in range(49)]
player_info = list(zip(names, ids, ranks))
player_list = []
for rec in player_info:
player_list.append(Player(name=rec[0], aga_id=rec[1],
aga_rating=rec[2]))
|
Create fake data for testing"""Generate fake data for testing purposes."""
from faker import Faker
from random import randint
from GoPlayer import Player
from pprint import pprint
fake = Faker()
names = [fake.name() for x in range(49)]
ids = [x for x in range(49)]
ranks = [randint(-30, 7) for x in range(49)]
player_info = list(zip(names, ids, ranks))
player_list = []
for rec in player_info:
player_list.append(Player(name=rec[0], aga_id=rec[1],
aga_rating=rec[2]))
|
<commit_before><commit_msg>Create fake data for testing<commit_after>"""Generate fake data for testing purposes."""
from faker import Faker
from random import randint
from GoPlayer import Player
from pprint import pprint
fake = Faker()
names = [fake.name() for x in range(49)]
ids = [x for x in range(49)]
ranks = [randint(-30, 7) for x in range(49)]
player_info = list(zip(names, ids, ranks))
player_list = []
for rec in player_info:
player_list.append(Player(name=rec[0], aga_id=rec[1],
aga_rating=rec[2]))
|
|
4ae806af174f7d64acfa7c802507863e2650de72
|
workflow/Workflow.py
|
workflow/Workflow.py
|
from git import *
def prefix():
# TODO: Extract to common class if other workflow providers installed
return "issue/"
def __findRepo():
return Repo(".")
def createBranchName(issueName, desc):
branchName = prefix() + str(issueName)
if (desc != None):
branchName += "/" + str(desc)
return branchName
def startIssue(issueName, root, desc=None):
""" Start a new issue branch """
print issueName, root, desc
repo = __findRepo()
branchName = createBranchName(issueName, desc)
print("Checkout: " + branchName)
repo.git.checkout(root, b=branchName)
def deleteIssue(issueName, desc=None, defaultBranch='master'):
""" Delete an issue branch
@param issueName
@param desc
@param defaultBranch -- branch the git repo will be on when the issue branch is deleted. Defaults to master
"""
repo = __findRepo()
branchName = createBranchName(issueName, desc)
print ("Checking out " + str(defaultBranch))
repo.git.checkout(defaultBranch)
print ("Deleting " + str(branchName))
repo.git.branch('-d', branchName)
|
Add a script to implement the workflow pieces.
|
Add a script to implement the workflow pieces.
|
Python
|
apache-2.0
|
bable5/git-workflow
|
Add a script to implement the workflow pieces.
|
from git import *
def prefix():
# TODO: Extract to common class if other workflow providers installed
return "issue/"
def __findRepo():
return Repo(".")
def createBranchName(issueName, desc):
branchName = prefix() + str(issueName)
if (desc != None):
branchName += "/" + str(desc)
return branchName
def startIssue(issueName, root, desc=None):
""" Start a new issue branch """
print issueName, root, desc
repo = __findRepo()
branchName = createBranchName(issueName, desc)
print("Checkout: " + branchName)
repo.git.checkout(root, b=branchName)
def deleteIssue(issueName, desc=None, defaultBranch='master'):
""" Delete an issue branch
@param issueName
@param desc
@param defaultBranch -- branch the git repo will be on when the issue branch is deleted. Defaults to master
"""
repo = __findRepo()
branchName = createBranchName(issueName, desc)
print ("Checking out " + str(defaultBranch))
repo.git.checkout(defaultBranch)
print ("Deleting " + str(branchName))
repo.git.branch('-d', branchName)
|
<commit_before><commit_msg>Add a script to implement the workflow pieces.<commit_after>
|
from git import *
def prefix():
# TODO: Extract to common class if other workflow providers installed
return "issue/"
def __findRepo():
return Repo(".")
def createBranchName(issueName, desc):
branchName = prefix() + str(issueName)
if (desc != None):
branchName += "/" + str(desc)
return branchName
def startIssue(issueName, root, desc=None):
""" Start a new issue branch """
print issueName, root, desc
repo = __findRepo()
branchName = createBranchName(issueName, desc)
print("Checkout: " + branchName)
repo.git.checkout(root, b=branchName)
def deleteIssue(issueName, desc=None, defaultBranch='master'):
""" Delete an issue branch
@param issueName
@param desc
@param defaultBranch -- branch the git repo will be on when the issue branch is deleted. Defaults to master
"""
repo = __findRepo()
branchName = createBranchName(issueName, desc)
print ("Checking out " + str(defaultBranch))
repo.git.checkout(defaultBranch)
print ("Deleting " + str(branchName))
repo.git.branch('-d', branchName)
|
Add a script to implement the workflow pieces.from git import *
def prefix():
# TODO: Extract to common class if other workflow providers installed
return "issue/"
def __findRepo():
return Repo(".")
def createBranchName(issueName, desc):
branchName = prefix() + str(issueName)
if (desc != None):
branchName += "/" + str(desc)
return branchName
def startIssue(issueName, root, desc=None):
""" Start a new issue branch """
print issueName, root, desc
repo = __findRepo()
branchName = createBranchName(issueName, desc)
print("Checkout: " + branchName)
repo.git.checkout(root, b=branchName)
def deleteIssue(issueName, desc=None, defaultBranch='master'):
""" Delete an issue branch
@param issueName
@param desc
@param defaultBranch -- branch the git repo will be on when the issue branch is deleted. Defaults to master
"""
repo = __findRepo()
branchName = createBranchName(issueName, desc)
print ("Checking out " + str(defaultBranch))
repo.git.checkout(defaultBranch)
print ("Deleting " + str(branchName))
repo.git.branch('-d', branchName)
|
<commit_before><commit_msg>Add a script to implement the workflow pieces.<commit_after>from git import *
def prefix():
# TODO: Extract to common class if other workflow providers installed
return "issue/"
def __findRepo():
return Repo(".")
def createBranchName(issueName, desc):
branchName = prefix() + str(issueName)
if (desc != None):
branchName += "/" + str(desc)
return branchName
def startIssue(issueName, root, desc=None):
""" Start a new issue branch """
print issueName, root, desc
repo = __findRepo()
branchName = createBranchName(issueName, desc)
print("Checkout: " + branchName)
repo.git.checkout(root, b=branchName)
def deleteIssue(issueName, desc=None, defaultBranch='master'):
""" Delete an issue branch
@param issueName
@param desc
@param defaultBranch -- branch the git repo will be on when the issue branch is deleted. Defaults to master
"""
repo = __findRepo()
branchName = createBranchName(issueName, desc)
print ("Checking out " + str(defaultBranch))
repo.git.checkout(defaultBranch)
print ("Deleting " + str(branchName))
repo.git.branch('-d', branchName)
|
|
b86ab8d1e7eb8f0329fea2146aefe56b9726920a
|
testing/models/test_campaign.py
|
testing/models/test_campaign.py
|
import pytest
from k2catalogue import models
@pytest.fixture
def campaign():
return models.Campaign(id=5)
def test_campaign_repr(campaign):
assert repr(campaign) == '<Campaign: 5>'
|
Add test for campaign repr
|
Add test for campaign repr
|
Python
|
mit
|
mindriot101/k2catalogue
|
Add test for campaign repr
|
import pytest
from k2catalogue import models
@pytest.fixture
def campaign():
return models.Campaign(id=5)
def test_campaign_repr(campaign):
assert repr(campaign) == '<Campaign: 5>'
|
<commit_before><commit_msg>Add test for campaign repr<commit_after>
|
import pytest
from k2catalogue import models
@pytest.fixture
def campaign():
return models.Campaign(id=5)
def test_campaign_repr(campaign):
assert repr(campaign) == '<Campaign: 5>'
|
Add test for campaign reprimport pytest
from k2catalogue import models
@pytest.fixture
def campaign():
return models.Campaign(id=5)
def test_campaign_repr(campaign):
assert repr(campaign) == '<Campaign: 5>'
|
<commit_before><commit_msg>Add test for campaign repr<commit_after>import pytest
from k2catalogue import models
@pytest.fixture
def campaign():
return models.Campaign(id=5)
def test_campaign_repr(campaign):
assert repr(campaign) == '<Campaign: 5>'
|
|
52640eb2053e155c7f33468c0e6c74512b062b0b
|
event-schemas/check_examples.py
|
event-schemas/check_examples.py
|
#! /usr/bin/env python
import sys
import json
import os
def import_error(module, package, debian, error):
sys.stderr.write((
"Error importing %(module)s: %(error)r\n"
"To install %(module)s run:\n"
" pip install %(package)s\n"
"or on Debian run:\n"
" sudo apt-get install python-%(debian)s\n"
) % locals())
if __name__ == '__main__':
sys.exit(1)
try:
import jsonschema
except ImportError as e:
import_error("jsonschema", "jsonschema", "jsonschema", e)
raise
try:
import yaml
except ImportError as e:
import_error("yaml", "PyYAML", "yaml", e)
raise
def check_example_file(examplepath, schemapath):
with open(examplepath) as f:
example = yaml.load(f)
with open(schemapath) as f:
schema = yaml.load(f)
fileurl = "file://" + os.path.abspath(schemapath)
print ("Checking schema for: %r %r" % (examplepath, schemapath))
# Setting the 'id' tells jsonschema where the file is so that it
# can correctly resolve relative $ref references in the schema
schema['id'] = fileurl
try:
jsonschema.validate(example, schema)
except:
raise ValueError("Error validating JSON schema for %r %r" % (
examplepath, schemapath
), e)
def check_example_dir(exampledir, schemadir):
for root, dirs, files in os.walk(exampledir):
for filename in files:
examplepath = os.path.join(root, filename)
schemapath = examplepath.replace(exampledir, schemadir)
check_example_file(examplepath, schemapath)
if __name__ == '__main__':
check_example_dir("examples", "schema")
|
Add a python script for checking that the examples match the event schema.
|
Add a python script for checking that the examples match the event
schema.
Does the same checks as check.sh, but is a *lot* faster making it
suitable for using as a pre-commit hook.
I don't suggest replacing check.sh since it's good to check that the
schema works with multiple implementations of jsonschema.
|
Python
|
apache-2.0
|
matrix-org/matrix-doc,matrix-org/matrix-doc,matrix-org/matrix-doc,matrix-org/matrix-doc
|
Add a python script for checking that the examples match the event
schema.
Does the same checks as check.sh, but is a *lot* faster making it
suitable for using as a pre-commit hook.
I don't suggest replacing check.sh since it's good to check that the
schema works with multiple implementations of jsonschema.
|
#! /usr/bin/env python
import sys
import json
import os
def import_error(module, package, debian, error):
sys.stderr.write((
"Error importing %(module)s: %(error)r\n"
"To install %(module)s run:\n"
" pip install %(package)s\n"
"or on Debian run:\n"
" sudo apt-get install python-%(debian)s\n"
) % locals())
if __name__ == '__main__':
sys.exit(1)
try:
import jsonschema
except ImportError as e:
import_error("jsonschema", "jsonschema", "jsonschema", e)
raise
try:
import yaml
except ImportError as e:
import_error("yaml", "PyYAML", "yaml", e)
raise
def check_example_file(examplepath, schemapath):
with open(examplepath) as f:
example = yaml.load(f)
with open(schemapath) as f:
schema = yaml.load(f)
fileurl = "file://" + os.path.abspath(schemapath)
print ("Checking schema for: %r %r" % (examplepath, schemapath))
# Setting the 'id' tells jsonschema where the file is so that it
# can correctly resolve relative $ref references in the schema
schema['id'] = fileurl
try:
jsonschema.validate(example, schema)
except:
raise ValueError("Error validating JSON schema for %r %r" % (
examplepath, schemapath
), e)
def check_example_dir(exampledir, schemadir):
for root, dirs, files in os.walk(exampledir):
for filename in files:
examplepath = os.path.join(root, filename)
schemapath = examplepath.replace(exampledir, schemadir)
check_example_file(examplepath, schemapath)
if __name__ == '__main__':
check_example_dir("examples", "schema")
|
<commit_before><commit_msg>Add a python script for checking that the examples match the event
schema.
Does the same checks as check.sh, but is a *lot* faster making it
suitable for using as a pre-commit hook.
I don't suggest replacing check.sh since it's good to check that the
schema works with multiple implementations of jsonschema.<commit_after>
|
#! /usr/bin/env python
import sys
import json
import os
def import_error(module, package, debian, error):
sys.stderr.write((
"Error importing %(module)s: %(error)r\n"
"To install %(module)s run:\n"
" pip install %(package)s\n"
"or on Debian run:\n"
" sudo apt-get install python-%(debian)s\n"
) % locals())
if __name__ == '__main__':
sys.exit(1)
try:
import jsonschema
except ImportError as e:
import_error("jsonschema", "jsonschema", "jsonschema", e)
raise
try:
import yaml
except ImportError as e:
import_error("yaml", "PyYAML", "yaml", e)
raise
def check_example_file(examplepath, schemapath):
with open(examplepath) as f:
example = yaml.load(f)
with open(schemapath) as f:
schema = yaml.load(f)
fileurl = "file://" + os.path.abspath(schemapath)
print ("Checking schema for: %r %r" % (examplepath, schemapath))
# Setting the 'id' tells jsonschema where the file is so that it
# can correctly resolve relative $ref references in the schema
schema['id'] = fileurl
try:
jsonschema.validate(example, schema)
except:
raise ValueError("Error validating JSON schema for %r %r" % (
examplepath, schemapath
), e)
def check_example_dir(exampledir, schemadir):
for root, dirs, files in os.walk(exampledir):
for filename in files:
examplepath = os.path.join(root, filename)
schemapath = examplepath.replace(exampledir, schemadir)
check_example_file(examplepath, schemapath)
if __name__ == '__main__':
check_example_dir("examples", "schema")
|
Add a python script for checking that the examples match the event
schema.
Does the same checks as check.sh, but is a *lot* faster making it
suitable for using as a pre-commit hook.
I don't suggest replacing check.sh since it's good to check that the
schema works with multiple implementations of jsonschema.#! /usr/bin/env python
import sys
import json
import os
def import_error(module, package, debian, error):
sys.stderr.write((
"Error importing %(module)s: %(error)r\n"
"To install %(module)s run:\n"
" pip install %(package)s\n"
"or on Debian run:\n"
" sudo apt-get install python-%(debian)s\n"
) % locals())
if __name__ == '__main__':
sys.exit(1)
try:
import jsonschema
except ImportError as e:
import_error("jsonschema", "jsonschema", "jsonschema", e)
raise
try:
import yaml
except ImportError as e:
import_error("yaml", "PyYAML", "yaml", e)
raise
def check_example_file(examplepath, schemapath):
with open(examplepath) as f:
example = yaml.load(f)
with open(schemapath) as f:
schema = yaml.load(f)
fileurl = "file://" + os.path.abspath(schemapath)
print ("Checking schema for: %r %r" % (examplepath, schemapath))
# Setting the 'id' tells jsonschema where the file is so that it
# can correctly resolve relative $ref references in the schema
schema['id'] = fileurl
try:
jsonschema.validate(example, schema)
except:
raise ValueError("Error validating JSON schema for %r %r" % (
examplepath, schemapath
), e)
def check_example_dir(exampledir, schemadir):
for root, dirs, files in os.walk(exampledir):
for filename in files:
examplepath = os.path.join(root, filename)
schemapath = examplepath.replace(exampledir, schemadir)
check_example_file(examplepath, schemapath)
if __name__ == '__main__':
check_example_dir("examples", "schema")
|
<commit_before><commit_msg>Add a python script for checking that the examples match the event
schema.
Does the same checks as check.sh, but is a *lot* faster making it
suitable for using as a pre-commit hook.
I don't suggest replacing check.sh since it's good to check that the
schema works with multiple implementations of jsonschema.<commit_after>#! /usr/bin/env python
import sys
import json
import os
def import_error(module, package, debian, error):
sys.stderr.write((
"Error importing %(module)s: %(error)r\n"
"To install %(module)s run:\n"
" pip install %(package)s\n"
"or on Debian run:\n"
" sudo apt-get install python-%(debian)s\n"
) % locals())
if __name__ == '__main__':
sys.exit(1)
try:
import jsonschema
except ImportError as e:
import_error("jsonschema", "jsonschema", "jsonschema", e)
raise
try:
import yaml
except ImportError as e:
import_error("yaml", "PyYAML", "yaml", e)
raise
def check_example_file(examplepath, schemapath):
with open(examplepath) as f:
example = yaml.load(f)
with open(schemapath) as f:
schema = yaml.load(f)
fileurl = "file://" + os.path.abspath(schemapath)
print ("Checking schema for: %r %r" % (examplepath, schemapath))
# Setting the 'id' tells jsonschema where the file is so that it
# can correctly resolve relative $ref references in the schema
schema['id'] = fileurl
try:
jsonschema.validate(example, schema)
except:
raise ValueError("Error validating JSON schema for %r %r" % (
examplepath, schemapath
), e)
def check_example_dir(exampledir, schemadir):
for root, dirs, files in os.walk(exampledir):
for filename in files:
examplepath = os.path.join(root, filename)
schemapath = examplepath.replace(exampledir, schemadir)
check_example_file(examplepath, schemapath)
if __name__ == '__main__':
check_example_dir("examples", "schema")
|
|
c2c38ec799721b5f8c671e69b59448450da76964
|
shop/cms_menus.py
|
shop/cms_menus.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from cms.menu_bases import CMSAttachMenu
from menus.base import NavigationNode
from menus.menu_pool import menu_pool
class CatalogMenu(CMSAttachMenu):
name = _("Catalog Menu")
def get_nodes(self, request):
try:
if self.instance.publisher_is_draft:
productpage_set = self.instance.publisher_public.productpage_set
else:
productpage_set = self.instance.productpage_set
except AttributeError:
return []
nodes = []
for id, productpage in enumerate(productpage_set.all(), 1):
node = NavigationNode(
title=productpage.product.product_name,
url=productpage.product.get_absolute_url(),
id=id,
)
nodes.append(node)
return nodes
menu_pool.register_menu(CatalogMenu)
|
Add CatalogMenu allowing to use the CMS menu system for products
|
Add CatalogMenu allowing to use the CMS menu system for products
|
Python
|
bsd-3-clause
|
divio/django-shop,nimbis/django-shop,divio/django-shop,nimbis/django-shop,awesto/django-shop,divio/django-shop,nimbis/django-shop,nimbis/django-shop,awesto/django-shop,awesto/django-shop
|
Add CatalogMenu allowing to use the CMS menu system for products
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from cms.menu_bases import CMSAttachMenu
from menus.base import NavigationNode
from menus.menu_pool import menu_pool
class CatalogMenu(CMSAttachMenu):
name = _("Catalog Menu")
def get_nodes(self, request):
try:
if self.instance.publisher_is_draft:
productpage_set = self.instance.publisher_public.productpage_set
else:
productpage_set = self.instance.productpage_set
except AttributeError:
return []
nodes = []
for id, productpage in enumerate(productpage_set.all(), 1):
node = NavigationNode(
title=productpage.product.product_name,
url=productpage.product.get_absolute_url(),
id=id,
)
nodes.append(node)
return nodes
menu_pool.register_menu(CatalogMenu)
|
<commit_before><commit_msg>Add CatalogMenu allowing to use the CMS menu system for products<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from cms.menu_bases import CMSAttachMenu
from menus.base import NavigationNode
from menus.menu_pool import menu_pool
class CatalogMenu(CMSAttachMenu):
name = _("Catalog Menu")
def get_nodes(self, request):
try:
if self.instance.publisher_is_draft:
productpage_set = self.instance.publisher_public.productpage_set
else:
productpage_set = self.instance.productpage_set
except AttributeError:
return []
nodes = []
for id, productpage in enumerate(productpage_set.all(), 1):
node = NavigationNode(
title=productpage.product.product_name,
url=productpage.product.get_absolute_url(),
id=id,
)
nodes.append(node)
return nodes
menu_pool.register_menu(CatalogMenu)
|
Add CatalogMenu allowing to use the CMS menu system for products# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from cms.menu_bases import CMSAttachMenu
from menus.base import NavigationNode
from menus.menu_pool import menu_pool
class CatalogMenu(CMSAttachMenu):
name = _("Catalog Menu")
def get_nodes(self, request):
try:
if self.instance.publisher_is_draft:
productpage_set = self.instance.publisher_public.productpage_set
else:
productpage_set = self.instance.productpage_set
except AttributeError:
return []
nodes = []
for id, productpage in enumerate(productpage_set.all(), 1):
node = NavigationNode(
title=productpage.product.product_name,
url=productpage.product.get_absolute_url(),
id=id,
)
nodes.append(node)
return nodes
menu_pool.register_menu(CatalogMenu)
|
<commit_before><commit_msg>Add CatalogMenu allowing to use the CMS menu system for products<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from cms.menu_bases import CMSAttachMenu
from menus.base import NavigationNode
from menus.menu_pool import menu_pool
class CatalogMenu(CMSAttachMenu):
name = _("Catalog Menu")
def get_nodes(self, request):
try:
if self.instance.publisher_is_draft:
productpage_set = self.instance.publisher_public.productpage_set
else:
productpage_set = self.instance.productpage_set
except AttributeError:
return []
nodes = []
for id, productpage in enumerate(productpage_set.all(), 1):
node = NavigationNode(
title=productpage.product.product_name,
url=productpage.product.get_absolute_url(),
id=id,
)
nodes.append(node)
return nodes
menu_pool.register_menu(CatalogMenu)
|
|
16f6f63d0d4b7362445d7aafdd6b664412ab7076
|
dic_reader_2D.py
|
dic_reader_2D.py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 13 12:50:28 2015
@author: ilyass.tabiai@gmail.com
@author: rolland.delorme@gmail.com
@author: diehl@ins.uni-bonn.de
"""
import csv
import numpy as np
class dic_reader_2D():
def __init__(self,path):
self.data = []
self.read(path)
self.sortdata()
def read(self,path):
with open(path,'rb') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',')
next(csvreader,None)
for row in csvreader:
self.data.append(np.array(map(float, row)))
def sortX(self,item):
return item[0]
def sortY(self,item):
return item[1]
def sortZ(self,item):
return item[2]
def sortdata(self):
self.data.sort(key = lambda x: (x[13], x[14]))
print len(self.data)
for i in range(0,len(self.data)):
print self.data[i]
|
Add the reder for two dimensional dic csv
|
Add the reder for two dimensional dic csv
|
Python
|
mit
|
lm2-poly/peridynamics_1D,lm2-poly/peridynamics_1D,ilyasst/peridynamics_1D
|
Add the reder for two dimensional dic csv
|
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 13 12:50:28 2015
@author: ilyass.tabiai@gmail.com
@author: rolland.delorme@gmail.com
@author: diehl@ins.uni-bonn.de
"""
import csv
import numpy as np
class dic_reader_2D():
def __init__(self,path):
self.data = []
self.read(path)
self.sortdata()
def read(self,path):
with open(path,'rb') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',')
next(csvreader,None)
for row in csvreader:
self.data.append(np.array(map(float, row)))
def sortX(self,item):
return item[0]
def sortY(self,item):
return item[1]
def sortZ(self,item):
return item[2]
def sortdata(self):
self.data.sort(key = lambda x: (x[13], x[14]))
print len(self.data)
for i in range(0,len(self.data)):
print self.data[i]
|
<commit_before><commit_msg>Add the reder for two dimensional dic csv<commit_after>
|
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 13 12:50:28 2015
@author: ilyass.tabiai@gmail.com
@author: rolland.delorme@gmail.com
@author: diehl@ins.uni-bonn.de
"""
import csv
import numpy as np
class dic_reader_2D():
def __init__(self,path):
self.data = []
self.read(path)
self.sortdata()
def read(self,path):
with open(path,'rb') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',')
next(csvreader,None)
for row in csvreader:
self.data.append(np.array(map(float, row)))
def sortX(self,item):
return item[0]
def sortY(self,item):
return item[1]
def sortZ(self,item):
return item[2]
def sortdata(self):
self.data.sort(key = lambda x: (x[13], x[14]))
print len(self.data)
for i in range(0,len(self.data)):
print self.data[i]
|
Add the reder for two dimensional dic csv# -*- coding: utf-8 -*-
"""
Created on Sun Dec 13 12:50:28 2015
@author: ilyass.tabiai@gmail.com
@author: rolland.delorme@gmail.com
@author: diehl@ins.uni-bonn.de
"""
import csv
import numpy as np
class dic_reader_2D():
def __init__(self,path):
self.data = []
self.read(path)
self.sortdata()
def read(self,path):
with open(path,'rb') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',')
next(csvreader,None)
for row in csvreader:
self.data.append(np.array(map(float, row)))
def sortX(self,item):
return item[0]
def sortY(self,item):
return item[1]
def sortZ(self,item):
return item[2]
def sortdata(self):
self.data.sort(key = lambda x: (x[13], x[14]))
print len(self.data)
for i in range(0,len(self.data)):
print self.data[i]
|
<commit_before><commit_msg>Add the reder for two dimensional dic csv<commit_after># -*- coding: utf-8 -*-
"""
Created on Sun Dec 13 12:50:28 2015
@author: ilyass.tabiai@gmail.com
@author: rolland.delorme@gmail.com
@author: diehl@ins.uni-bonn.de
"""
import csv
import numpy as np
class dic_reader_2D():
def __init__(self,path):
self.data = []
self.read(path)
self.sortdata()
def read(self,path):
with open(path,'rb') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',')
next(csvreader,None)
for row in csvreader:
self.data.append(np.array(map(float, row)))
def sortX(self,item):
return item[0]
def sortY(self,item):
return item[1]
def sortZ(self,item):
return item[2]
def sortdata(self):
self.data.sort(key = lambda x: (x[13], x[14]))
print len(self.data)
for i in range(0,len(self.data)):
print self.data[i]
|
|
88b77d2bedda3b08eb55ef163de577e775c6bb26
|
tools/experimental_ssh_eventlet.py
|
tools/experimental_ssh_eventlet.py
|
#!/usr/bin/python
import eventlet
from eventlet.green import socket
import libssh2
import time
import os
import random
def monitor(hostname, username, id):
print '%s %s %d' % (hostname, username, id)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((hostname, 22))
session = libssh2.Session()
started = False
while not started:
try:
session.startup(sock)
started = True
except:
eventlet.sleep(1)
session.userauth_publickey_fromfile(
username,
os.path.expanduser('~/.ssh/id_rsa.pub'),
os.path.expanduser('~/.ssh/id_rsa'),
'')
while True:
sl = random.randint(1, 20)
eventlet.sleep(sl)
channel = session.channel()
channel.execute('uname -a')
stdout = []
#stderr = []
while not channel.eof:
data = channel.read(1024)
if data:
stdout.append(data)
#data = channel.read(1024, libssh2.STDERR)
#if data:
# stderr.append(data)
print '%d %d %s' % (id, sl, ''.join(stdout))
#print ''.join(stderr)
pool = eventlet.GreenPool()
i = 1
while True:
pool.spawn_n(monitor, '192.168.122.238', 'root', i)
i = i + 1
if i > 800: break
pool.waitall()
|
Add an experimental ssh monitoring script.
|
Add an experimental ssh monitoring script.
Signed-off-by: Angus Salkeld <86b65304d27d8de73dd7d624c33df7e088f8d94b@redhat.com>
|
Python
|
apache-2.0
|
gonzolino/heat,jasondunsmore/heat,steveb/heat-cfntools,miguelgrinberg/heat,miguelgrinberg/heat,rickerc/heat_audit,dragorosson/heat,jasondunsmore/heat,cwolferh/heat-scratch,NeCTAR-RC/heat,rdo-management/heat,srznew/heat,rh-s/heat,NeCTAR-RC/heat,citrix-openstack-build/heat,dragorosson/heat,maestro-hybrid-cloud/heat,steveb/heat-cfntools,pshchelo/heat,Triv90/Heat,pratikmallya/heat,bbandaru/heat-cfntools,gonzolino/heat,sdake/heat-jeos,maestro-hybrid-cloud/heat,takeshineshiro/heat,openstack/heat,citrix-openstack-build/heat,steveb/heat,pshchelo/heat,rh-s/heat,rdo-management/heat,rickerc/heat_audit,JioCloud/heat,ntt-sic/heat,dims/heat,Triv90/Heat,pratikmallya/heat,noironetworks/heat,srznew/heat,varunarya10/heat,cryptickp/heat,ntt-sic/heat,openstack/heat-cfntools,Triv90/Heat,openstack/heat,steveb/heat,cryptickp/heat,redhat-openstack/heat,takeshineshiro/heat,noironetworks/heat,varunarya10/heat,cwolferh/heat-scratch,JioCloud/heat,redhat-openstack/heat,dims/heat
|
Add an experimental ssh monitoring script.
Signed-off-by: Angus Salkeld <86b65304d27d8de73dd7d624c33df7e088f8d94b@redhat.com>
|
#!/usr/bin/python
import eventlet
from eventlet.green import socket
import libssh2
import time
import os
import random
def monitor(hostname, username, id):
print '%s %s %d' % (hostname, username, id)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((hostname, 22))
session = libssh2.Session()
started = False
while not started:
try:
session.startup(sock)
started = True
except:
eventlet.sleep(1)
session.userauth_publickey_fromfile(
username,
os.path.expanduser('~/.ssh/id_rsa.pub'),
os.path.expanduser('~/.ssh/id_rsa'),
'')
while True:
sl = random.randint(1, 20)
eventlet.sleep(sl)
channel = session.channel()
channel.execute('uname -a')
stdout = []
#stderr = []
while not channel.eof:
data = channel.read(1024)
if data:
stdout.append(data)
#data = channel.read(1024, libssh2.STDERR)
#if data:
# stderr.append(data)
print '%d %d %s' % (id, sl, ''.join(stdout))
#print ''.join(stderr)
pool = eventlet.GreenPool()
i = 1
while True:
pool.spawn_n(monitor, '192.168.122.238', 'root', i)
i = i + 1
if i > 800: break
pool.waitall()
|
<commit_before><commit_msg>Add an experimental ssh monitoring script.
Signed-off-by: Angus Salkeld <86b65304d27d8de73dd7d624c33df7e088f8d94b@redhat.com><commit_after>
|
#!/usr/bin/python
import eventlet
from eventlet.green import socket
import libssh2
import time
import os
import random
def monitor(hostname, username, id):
print '%s %s %d' % (hostname, username, id)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((hostname, 22))
session = libssh2.Session()
started = False
while not started:
try:
session.startup(sock)
started = True
except:
eventlet.sleep(1)
session.userauth_publickey_fromfile(
username,
os.path.expanduser('~/.ssh/id_rsa.pub'),
os.path.expanduser('~/.ssh/id_rsa'),
'')
while True:
sl = random.randint(1, 20)
eventlet.sleep(sl)
channel = session.channel()
channel.execute('uname -a')
stdout = []
#stderr = []
while not channel.eof:
data = channel.read(1024)
if data:
stdout.append(data)
#data = channel.read(1024, libssh2.STDERR)
#if data:
# stderr.append(data)
print '%d %d %s' % (id, sl, ''.join(stdout))
#print ''.join(stderr)
pool = eventlet.GreenPool()
i = 1
while True:
pool.spawn_n(monitor, '192.168.122.238', 'root', i)
i = i + 1
if i > 800: break
pool.waitall()
|
Add an experimental ssh monitoring script.
Signed-off-by: Angus Salkeld <86b65304d27d8de73dd7d624c33df7e088f8d94b@redhat.com>#!/usr/bin/python
import eventlet
from eventlet.green import socket
import libssh2
import time
import os
import random
def monitor(hostname, username, id):
print '%s %s %d' % (hostname, username, id)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((hostname, 22))
session = libssh2.Session()
started = False
while not started:
try:
session.startup(sock)
started = True
except:
eventlet.sleep(1)
session.userauth_publickey_fromfile(
username,
os.path.expanduser('~/.ssh/id_rsa.pub'),
os.path.expanduser('~/.ssh/id_rsa'),
'')
while True:
sl = random.randint(1, 20)
eventlet.sleep(sl)
channel = session.channel()
channel.execute('uname -a')
stdout = []
#stderr = []
while not channel.eof:
data = channel.read(1024)
if data:
stdout.append(data)
#data = channel.read(1024, libssh2.STDERR)
#if data:
# stderr.append(data)
print '%d %d %s' % (id, sl, ''.join(stdout))
#print ''.join(stderr)
pool = eventlet.GreenPool()
i = 1
while True:
pool.spawn_n(monitor, '192.168.122.238', 'root', i)
i = i + 1
if i > 800: break
pool.waitall()
|
<commit_before><commit_msg>Add an experimental ssh monitoring script.
Signed-off-by: Angus Salkeld <86b65304d27d8de73dd7d624c33df7e088f8d94b@redhat.com><commit_after>#!/usr/bin/python
import eventlet
from eventlet.green import socket
import libssh2
import time
import os
import random
def monitor(hostname, username, id):
print '%s %s %d' % (hostname, username, id)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((hostname, 22))
session = libssh2.Session()
started = False
while not started:
try:
session.startup(sock)
started = True
except:
eventlet.sleep(1)
session.userauth_publickey_fromfile(
username,
os.path.expanduser('~/.ssh/id_rsa.pub'),
os.path.expanduser('~/.ssh/id_rsa'),
'')
while True:
sl = random.randint(1, 20)
eventlet.sleep(sl)
channel = session.channel()
channel.execute('uname -a')
stdout = []
#stderr = []
while not channel.eof:
data = channel.read(1024)
if data:
stdout.append(data)
#data = channel.read(1024, libssh2.STDERR)
#if data:
# stderr.append(data)
print '%d %d %s' % (id, sl, ''.join(stdout))
#print ''.join(stderr)
pool = eventlet.GreenPool()
i = 1
while True:
pool.spawn_n(monitor, '192.168.122.238', 'root', i)
i = i + 1
if i > 800: break
pool.waitall()
|
|
cc240f959156f4b9facc6a954019efa98d8cff86
|
array/989.py
|
array/989.py
|
class Solution:
def addToArrayForm(self, A: List[int], K: int) -> List[int]:
num = int(''.join(str(x) for x in A)) + K
return ([int(x) for x in str(num)])
|
Add to Array-Form of Integer
|
Add to Array-Form of Integer
|
Python
|
apache-2.0
|
MingfeiPan/leetcode,MingfeiPan/leetcode,MingfeiPan/leetcode,MingfeiPan/leetcode,MingfeiPan/leetcode
|
Add to Array-Form of Integer
|
class Solution:
def addToArrayForm(self, A: List[int], K: int) -> List[int]:
num = int(''.join(str(x) for x in A)) + K
return ([int(x) for x in str(num)])
|
<commit_before><commit_msg>Add to Array-Form of Integer<commit_after>
|
class Solution:
def addToArrayForm(self, A: List[int], K: int) -> List[int]:
num = int(''.join(str(x) for x in A)) + K
return ([int(x) for x in str(num)])
|
Add to Array-Form of Integerclass Solution:
def addToArrayForm(self, A: List[int], K: int) -> List[int]:
num = int(''.join(str(x) for x in A)) + K
return ([int(x) for x in str(num)])
|
<commit_before><commit_msg>Add to Array-Form of Integer<commit_after>class Solution:
def addToArrayForm(self, A: List[int], K: int) -> List[int]:
num = int(''.join(str(x) for x in A)) + K
return ([int(x) for x in str(num)])
|
|
b05cb7b5b8f507f63c9f1c43db43a29a3acf59c9
|
physics/ccg_haar.py
|
physics/ccg_haar.py
|
# encoding: utf-8
"""Routines for sampling from the Haar measures of the classical compact
groups. Algorithms taken from http://arxiv.org/abs/math-ph/0609050.
TODO Symplectic groups are missing
"""
from __future__ import division, print_function
import numpy as np
from scipy.linalg import qr, eigvals
def goe(dim, randn=np.random.randn):
"""Returns a sample from the Gaussian orthogonal ensemble of given
dimension. (i.e. the haar measure on U(dim)).
:param int dim: Dimension
:param randn: Function to create real N(0,1) distributed random variables.
It should take the shape of the output as numpy.random.randn does
(default: numpy.random.randn)
"""
z = randn(dim, dim)
q, r = qr(z)
d = np.diagonal(r)
ph = d / np.abs(d)
return q * ph
def gue(dim, randn=np.random.randn):
"""Returns a sample from the Gaussian unitary ensemble of given dimension.
(i.e. the haar measure on U(dim)).
:param int dim: Dimension
:param randn: Function to create real N(0,1) distributed random variables.
It should take the shape of the output as numpy.random.randn does
(default: numpy.random.randn)
"""
z = (randn(dim, dim) + 1j * randn(dim, dim)) / np.sqrt(2.0)
q, r = qr(z)
d = np.diagonal(r)
ph = d / np.abs(d)
return q * ph
#############
# Tesing #
#############
LEVEL_SPACING_DF = {'goe': lambda s: np.pi / 2 * s * np.exp(-np.pi / 4 * s**2),
'gue': lambda s: 32 / np.pi**2 * s**2 * np.exp(-4 / np.pi * s**2)
}
def _test_ensemble(dim, ensemble, samples=1000):
import matplotlib
matplotlib.use('TkAgg')
from matplotlib import pyplot as pl
from tools.helpers import Progress
eigdists = []
get_sample = globals()[ensemble]
for _ in Progress(xrange(samples)):
u = get_sample(dim)
eigs = np.sort(np.angle(eigvals(u)))
eigdists += list(eigs[1:] - eigs[:-1])
eigdists = np.asarray(eigdists) / (np.sum(eigdists) / len(eigdists))
pl.hist(eigdists, bins=50, normed=True)
dist = lambda s: 32 / np.pi**2 * s**2 * np.exp(-4 / np.pi * s**2)
s = np.linspace(0, 4, 100)
pl.plot(s, dist(s))
pl.show()
if __name__ == '__main__':
_test_ensemble(50, 'gue', samples=10000)
_test_ensemble(50, 'goe', samples=10000)
|
Add the random matrix module
|
Add the random matrix module
|
Python
|
unlicense
|
dseuss/pythonlibs
|
Add the random matrix module
|
# encoding: utf-8
"""Routines for sampling from the Haar measures of the classical compact
groups. Algorithms taken from http://arxiv.org/abs/math-ph/0609050.
TODO Symplectic groups are missing
"""
from __future__ import division, print_function
import numpy as np
from scipy.linalg import qr, eigvals
def goe(dim, randn=np.random.randn):
"""Returns a sample from the Gaussian orthogonal ensemble of given
dimension. (i.e. the haar measure on U(dim)).
:param int dim: Dimension
:param randn: Function to create real N(0,1) distributed random variables.
It should take the shape of the output as numpy.random.randn does
(default: numpy.random.randn)
"""
z = randn(dim, dim)
q, r = qr(z)
d = np.diagonal(r)
ph = d / np.abs(d)
return q * ph
def gue(dim, randn=np.random.randn):
"""Returns a sample from the Gaussian unitary ensemble of given dimension.
(i.e. the haar measure on U(dim)).
:param int dim: Dimension
:param randn: Function to create real N(0,1) distributed random variables.
It should take the shape of the output as numpy.random.randn does
(default: numpy.random.randn)
"""
z = (randn(dim, dim) + 1j * randn(dim, dim)) / np.sqrt(2.0)
q, r = qr(z)
d = np.diagonal(r)
ph = d / np.abs(d)
return q * ph
#############
# Tesing #
#############
LEVEL_SPACING_DF = {'goe': lambda s: np.pi / 2 * s * np.exp(-np.pi / 4 * s**2),
'gue': lambda s: 32 / np.pi**2 * s**2 * np.exp(-4 / np.pi * s**2)
}
def _test_ensemble(dim, ensemble, samples=1000):
import matplotlib
matplotlib.use('TkAgg')
from matplotlib import pyplot as pl
from tools.helpers import Progress
eigdists = []
get_sample = globals()[ensemble]
for _ in Progress(xrange(samples)):
u = get_sample(dim)
eigs = np.sort(np.angle(eigvals(u)))
eigdists += list(eigs[1:] - eigs[:-1])
eigdists = np.asarray(eigdists) / (np.sum(eigdists) / len(eigdists))
pl.hist(eigdists, bins=50, normed=True)
dist = lambda s: 32 / np.pi**2 * s**2 * np.exp(-4 / np.pi * s**2)
s = np.linspace(0, 4, 100)
pl.plot(s, dist(s))
pl.show()
if __name__ == '__main__':
_test_ensemble(50, 'gue', samples=10000)
_test_ensemble(50, 'goe', samples=10000)
|
<commit_before><commit_msg>Add the random matrix module<commit_after>
|
# encoding: utf-8
"""Routines for sampling from the Haar measures of the classical compact
groups. Algorithms taken from http://arxiv.org/abs/math-ph/0609050.
TODO Symplectic groups are missing
"""
from __future__ import division, print_function
import numpy as np
from scipy.linalg import qr, eigvals
def goe(dim, randn=np.random.randn):
"""Returns a sample from the Gaussian orthogonal ensemble of given
dimension. (i.e. the haar measure on U(dim)).
:param int dim: Dimension
:param randn: Function to create real N(0,1) distributed random variables.
It should take the shape of the output as numpy.random.randn does
(default: numpy.random.randn)
"""
z = randn(dim, dim)
q, r = qr(z)
d = np.diagonal(r)
ph = d / np.abs(d)
return q * ph
def gue(dim, randn=np.random.randn):
"""Returns a sample from the Gaussian unitary ensemble of given dimension.
(i.e. the haar measure on U(dim)).
:param int dim: Dimension
:param randn: Function to create real N(0,1) distributed random variables.
It should take the shape of the output as numpy.random.randn does
(default: numpy.random.randn)
"""
z = (randn(dim, dim) + 1j * randn(dim, dim)) / np.sqrt(2.0)
q, r = qr(z)
d = np.diagonal(r)
ph = d / np.abs(d)
return q * ph
#############
# Tesing #
#############
LEVEL_SPACING_DF = {'goe': lambda s: np.pi / 2 * s * np.exp(-np.pi / 4 * s**2),
'gue': lambda s: 32 / np.pi**2 * s**2 * np.exp(-4 / np.pi * s**2)
}
def _test_ensemble(dim, ensemble, samples=1000):
import matplotlib
matplotlib.use('TkAgg')
from matplotlib import pyplot as pl
from tools.helpers import Progress
eigdists = []
get_sample = globals()[ensemble]
for _ in Progress(xrange(samples)):
u = get_sample(dim)
eigs = np.sort(np.angle(eigvals(u)))
eigdists += list(eigs[1:] - eigs[:-1])
eigdists = np.asarray(eigdists) / (np.sum(eigdists) / len(eigdists))
pl.hist(eigdists, bins=50, normed=True)
dist = lambda s: 32 / np.pi**2 * s**2 * np.exp(-4 / np.pi * s**2)
s = np.linspace(0, 4, 100)
pl.plot(s, dist(s))
pl.show()
if __name__ == '__main__':
_test_ensemble(50, 'gue', samples=10000)
_test_ensemble(50, 'goe', samples=10000)
|
Add the random matrix module
# encoding: utf-8
"""Routines for sampling from the Haar measures of the classical compact
groups. Algorithms taken from http://arxiv.org/abs/math-ph/0609050.
TODO Symplectic groups are missing
"""
from __future__ import division, print_function
import numpy as np
from scipy.linalg import qr, eigvals
def goe(dim, randn=np.random.randn):
"""Returns a sample from the Gaussian orthogonal ensemble of given
dimension. (i.e. the haar measure on U(dim)).
:param int dim: Dimension
:param randn: Function to create real N(0,1) distributed random variables.
It should take the shape of the output as numpy.random.randn does
(default: numpy.random.randn)
"""
z = randn(dim, dim)
q, r = qr(z)
d = np.diagonal(r)
ph = d / np.abs(d)
return q * ph
def gue(dim, randn=np.random.randn):
"""Returns a sample from the Gaussian unitary ensemble of given dimension.
(i.e. the haar measure on U(dim)).
:param int dim: Dimension
:param randn: Function to create real N(0,1) distributed random variables.
It should take the shape of the output as numpy.random.randn does
(default: numpy.random.randn)
"""
z = (randn(dim, dim) + 1j * randn(dim, dim)) / np.sqrt(2.0)
q, r = qr(z)
d = np.diagonal(r)
ph = d / np.abs(d)
return q * ph
#############
# Tesing #
#############
LEVEL_SPACING_DF = {'goe': lambda s: np.pi / 2 * s * np.exp(-np.pi / 4 * s**2),
'gue': lambda s: 32 / np.pi**2 * s**2 * np.exp(-4 / np.pi * s**2)
}
def _test_ensemble(dim, ensemble, samples=1000):
import matplotlib
matplotlib.use('TkAgg')
from matplotlib import pyplot as pl
from tools.helpers import Progress
eigdists = []
get_sample = globals()[ensemble]
for _ in Progress(xrange(samples)):
u = get_sample(dim)
eigs = np.sort(np.angle(eigvals(u)))
eigdists += list(eigs[1:] - eigs[:-1])
eigdists = np.asarray(eigdists) / (np.sum(eigdists) / len(eigdists))
pl.hist(eigdists, bins=50, normed=True)
dist = lambda s: 32 / np.pi**2 * s**2 * np.exp(-4 / np.pi * s**2)
s = np.linspace(0, 4, 100)
pl.plot(s, dist(s))
pl.show()
if __name__ == '__main__':
_test_ensemble(50, 'gue', samples=10000)
_test_ensemble(50, 'goe', samples=10000)
|
<commit_before><commit_msg>Add the random matrix module<commit_after>
# encoding: utf-8
"""Routines for sampling from the Haar measures of the classical compact
groups. Algorithms taken from http://arxiv.org/abs/math-ph/0609050.
TODO Symplectic groups are missing
"""
from __future__ import division, print_function
import numpy as np
from scipy.linalg import qr, eigvals
def goe(dim, randn=np.random.randn):
"""Returns a sample from the Gaussian orthogonal ensemble of given
dimension. (i.e. the haar measure on U(dim)).
:param int dim: Dimension
:param randn: Function to create real N(0,1) distributed random variables.
It should take the shape of the output as numpy.random.randn does
(default: numpy.random.randn)
"""
z = randn(dim, dim)
q, r = qr(z)
d = np.diagonal(r)
ph = d / np.abs(d)
return q * ph
def gue(dim, randn=np.random.randn):
"""Returns a sample from the Gaussian unitary ensemble of given dimension.
(i.e. the haar measure on U(dim)).
:param int dim: Dimension
:param randn: Function to create real N(0,1) distributed random variables.
It should take the shape of the output as numpy.random.randn does
(default: numpy.random.randn)
"""
z = (randn(dim, dim) + 1j * randn(dim, dim)) / np.sqrt(2.0)
q, r = qr(z)
d = np.diagonal(r)
ph = d / np.abs(d)
return q * ph
#############
# Tesing #
#############
LEVEL_SPACING_DF = {'goe': lambda s: np.pi / 2 * s * np.exp(-np.pi / 4 * s**2),
'gue': lambda s: 32 / np.pi**2 * s**2 * np.exp(-4 / np.pi * s**2)
}
def _test_ensemble(dim, ensemble, samples=1000):
import matplotlib
matplotlib.use('TkAgg')
from matplotlib import pyplot as pl
from tools.helpers import Progress
eigdists = []
get_sample = globals()[ensemble]
for _ in Progress(xrange(samples)):
u = get_sample(dim)
eigs = np.sort(np.angle(eigvals(u)))
eigdists += list(eigs[1:] - eigs[:-1])
eigdists = np.asarray(eigdists) / (np.sum(eigdists) / len(eigdists))
pl.hist(eigdists, bins=50, normed=True)
dist = lambda s: 32 / np.pi**2 * s**2 * np.exp(-4 / np.pi * s**2)
s = np.linspace(0, 4, 100)
pl.plot(s, dist(s))
pl.show()
if __name__ == '__main__':
_test_ensemble(50, 'gue', samples=10000)
_test_ensemble(50, 'goe', samples=10000)
|
|
993424c053e9a1da9011bd4a8835e95ad881e903
|
alerts/open_port_violation.py
|
alerts/open_port_violation.py
|
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2014 Mozilla Corporation
#
# Contributors:
# Jonathan Claudius jclaudius@mozilla.com
# Brandon Myers bmyers@mozilla.com
from lib.alerttask import AlertTask
from query_models import SearchQuery, TermMatch, PhraseMatch, TermsMatch
class AlertOpenPortViolation(AlertTask):
def main(self):
search_query = SearchQuery(hours=4)
search_query.add_must([
TermMatch('_type', 'event'),
PhraseMatch('tag', 'open_port_policy_violation'),
])
self.filtersManual(search_query)
# Search aggregations on field 'sourceipaddress', keep X samples of
# events at most
self.searchEventsAggregated('details.destinationipaddress', samplesLimit=100)
# alert when >= X matching events in an aggregation
self.walkAggregations(threshold=1)
# Set alert properties
def onAggregation(self, aggreg):
# aggreg['count']: number of items in the aggregation, ex: number of failed login attempts
# aggreg['value']: value of the aggregation field, ex: toto@example.com
# aggreg['events']: list of events in the aggregation
category = 'open_port_policy_violation'
tags = ['open_port_policy_violation']
severity = 'CRITICAL'
summary = ('{0} unauthorized open port(s) on {1}'.format(aggreg['count'], aggreg['value']))
# hosts = self.mostCommon(aggreg['allevents'], '_source.details.hostname')
# for i in hosts[:5]:
# summary += ' {0} ({1} hits)'.format(i[0], i[1])
# Create the alert object based on these properties
return self.createAlertDict(summary, category, tags, aggreg['events'], severity)
|
Add open port violation alert
|
Add open port violation alert
|
Python
|
mpl-2.0
|
mozilla/MozDef,jeffbryner/MozDef,jeffbryner/MozDef,ameihm0912/MozDef,mpurzynski/MozDef,ameihm0912/MozDef,mpurzynski/MozDef,mpurzynski/MozDef,mpurzynski/MozDef,ameihm0912/MozDef,Phrozyn/MozDef,mozilla/MozDef,Phrozyn/MozDef,jeffbryner/MozDef,gdestuynder/MozDef,ameihm0912/MozDef,gdestuynder/MozDef,mozilla/MozDef,gdestuynder/MozDef,mozilla/MozDef,Phrozyn/MozDef,jeffbryner/MozDef,gdestuynder/MozDef,Phrozyn/MozDef
|
Add open port violation alert
|
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2014 Mozilla Corporation
#
# Contributors:
# Jonathan Claudius jclaudius@mozilla.com
# Brandon Myers bmyers@mozilla.com
from lib.alerttask import AlertTask
from query_models import SearchQuery, TermMatch, PhraseMatch, TermsMatch
class AlertOpenPortViolation(AlertTask):
def main(self):
search_query = SearchQuery(hours=4)
search_query.add_must([
TermMatch('_type', 'event'),
PhraseMatch('tag', 'open_port_policy_violation'),
])
self.filtersManual(search_query)
# Search aggregations on field 'sourceipaddress', keep X samples of
# events at most
self.searchEventsAggregated('details.destinationipaddress', samplesLimit=100)
# alert when >= X matching events in an aggregation
self.walkAggregations(threshold=1)
# Set alert properties
def onAggregation(self, aggreg):
# aggreg['count']: number of items in the aggregation, ex: number of failed login attempts
# aggreg['value']: value of the aggregation field, ex: toto@example.com
# aggreg['events']: list of events in the aggregation
category = 'open_port_policy_violation'
tags = ['open_port_policy_violation']
severity = 'CRITICAL'
summary = ('{0} unauthorized open port(s) on {1}'.format(aggreg['count'], aggreg['value']))
# hosts = self.mostCommon(aggreg['allevents'], '_source.details.hostname')
# for i in hosts[:5]:
# summary += ' {0} ({1} hits)'.format(i[0], i[1])
# Create the alert object based on these properties
return self.createAlertDict(summary, category, tags, aggreg['events'], severity)
|
<commit_before><commit_msg>Add open port violation alert<commit_after>
|
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2014 Mozilla Corporation
#
# Contributors:
# Jonathan Claudius jclaudius@mozilla.com
# Brandon Myers bmyers@mozilla.com
from lib.alerttask import AlertTask
from query_models import SearchQuery, TermMatch, PhraseMatch, TermsMatch
class AlertOpenPortViolation(AlertTask):
def main(self):
search_query = SearchQuery(hours=4)
search_query.add_must([
TermMatch('_type', 'event'),
PhraseMatch('tag', 'open_port_policy_violation'),
])
self.filtersManual(search_query)
# Search aggregations on field 'sourceipaddress', keep X samples of
# events at most
self.searchEventsAggregated('details.destinationipaddress', samplesLimit=100)
# alert when >= X matching events in an aggregation
self.walkAggregations(threshold=1)
# Set alert properties
def onAggregation(self, aggreg):
# aggreg['count']: number of items in the aggregation, ex: number of failed login attempts
# aggreg['value']: value of the aggregation field, ex: toto@example.com
# aggreg['events']: list of events in the aggregation
category = 'open_port_policy_violation'
tags = ['open_port_policy_violation']
severity = 'CRITICAL'
summary = ('{0} unauthorized open port(s) on {1}'.format(aggreg['count'], aggreg['value']))
# hosts = self.mostCommon(aggreg['allevents'], '_source.details.hostname')
# for i in hosts[:5]:
# summary += ' {0} ({1} hits)'.format(i[0], i[1])
# Create the alert object based on these properties
return self.createAlertDict(summary, category, tags, aggreg['events'], severity)
|
Add open port violation alert#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2014 Mozilla Corporation
#
# Contributors:
# Jonathan Claudius jclaudius@mozilla.com
# Brandon Myers bmyers@mozilla.com
from lib.alerttask import AlertTask
from query_models import SearchQuery, TermMatch, PhraseMatch, TermsMatch
class AlertOpenPortViolation(AlertTask):
def main(self):
search_query = SearchQuery(hours=4)
search_query.add_must([
TermMatch('_type', 'event'),
PhraseMatch('tag', 'open_port_policy_violation'),
])
self.filtersManual(search_query)
# Search aggregations on field 'sourceipaddress', keep X samples of
# events at most
self.searchEventsAggregated('details.destinationipaddress', samplesLimit=100)
# alert when >= X matching events in an aggregation
self.walkAggregations(threshold=1)
# Set alert properties
def onAggregation(self, aggreg):
# aggreg['count']: number of items in the aggregation, ex: number of failed login attempts
# aggreg['value']: value of the aggregation field, ex: toto@example.com
# aggreg['events']: list of events in the aggregation
category = 'open_port_policy_violation'
tags = ['open_port_policy_violation']
severity = 'CRITICAL'
summary = ('{0} unauthorized open port(s) on {1}'.format(aggreg['count'], aggreg['value']))
# hosts = self.mostCommon(aggreg['allevents'], '_source.details.hostname')
# for i in hosts[:5]:
# summary += ' {0} ({1} hits)'.format(i[0], i[1])
# Create the alert object based on these properties
return self.createAlertDict(summary, category, tags, aggreg['events'], severity)
|
<commit_before><commit_msg>Add open port violation alert<commit_after>#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2014 Mozilla Corporation
#
# Contributors:
# Jonathan Claudius jclaudius@mozilla.com
# Brandon Myers bmyers@mozilla.com
from lib.alerttask import AlertTask
from query_models import SearchQuery, TermMatch, PhraseMatch, TermsMatch
class AlertOpenPortViolation(AlertTask):
def main(self):
search_query = SearchQuery(hours=4)
search_query.add_must([
TermMatch('_type', 'event'),
PhraseMatch('tag', 'open_port_policy_violation'),
])
self.filtersManual(search_query)
# Search aggregations on field 'sourceipaddress', keep X samples of
# events at most
self.searchEventsAggregated('details.destinationipaddress', samplesLimit=100)
# alert when >= X matching events in an aggregation
self.walkAggregations(threshold=1)
# Set alert properties
def onAggregation(self, aggreg):
# aggreg['count']: number of items in the aggregation, ex: number of failed login attempts
# aggreg['value']: value of the aggregation field, ex: toto@example.com
# aggreg['events']: list of events in the aggregation
category = 'open_port_policy_violation'
tags = ['open_port_policy_violation']
severity = 'CRITICAL'
summary = ('{0} unauthorized open port(s) on {1}'.format(aggreg['count'], aggreg['value']))
# hosts = self.mostCommon(aggreg['allevents'], '_source.details.hostname')
# for i in hosts[:5]:
# summary += ' {0} ({1} hits)'.format(i[0], i[1])
# Create the alert object based on these properties
return self.createAlertDict(summary, category, tags, aggreg['events'], severity)
|
|
82464394910d46a24e0ba3cd015530ee85d68bab
|
examples/save_jpeg_using_pil.py
|
examples/save_jpeg_using_pil.py
|
# Usage: python examples/save_jpeg_using_pil.py src.CR2 dest.jpg
# Requires PIL (pip install pillow)
import sys
from PIL import Image
from rawkit.raw import Raw
src = sys.argv[1]
dest = sys.argv[2]
with Raw(filename=src) as raw:
rgb_buffer = raw.to_buffer()
# Convert the buffer from [r, g, b, r...] to [(r, g, b), (r, g, b)...]
rgb_tuples = [
tuple(rgb_buffer[i:i+3]) for i in range(0, len(rgb_buffer), 3)
]
image = Image.new('RGB', [raw.metadata.width, raw.metadata.height])
image.putdata(rgb_tuples)
image.save(dest)
|
Add example saving jpg using pillow
|
Add example saving jpg using pillow
|
Python
|
mit
|
photoshell/rawkit
|
Add example saving jpg using pillow
|
# Usage: python examples/save_jpeg_using_pil.py src.CR2 dest.jpg
# Requires PIL (pip install pillow)
import sys
from PIL import Image
from rawkit.raw import Raw
src = sys.argv[1]
dest = sys.argv[2]
with Raw(filename=src) as raw:
rgb_buffer = raw.to_buffer()
# Convert the buffer from [r, g, b, r...] to [(r, g, b), (r, g, b)...]
rgb_tuples = [
tuple(rgb_buffer[i:i+3]) for i in range(0, len(rgb_buffer), 3)
]
image = Image.new('RGB', [raw.metadata.width, raw.metadata.height])
image.putdata(rgb_tuples)
image.save(dest)
|
<commit_before><commit_msg>Add example saving jpg using pillow<commit_after>
|
# Usage: python examples/save_jpeg_using_pil.py src.CR2 dest.jpg
# Requires PIL (pip install pillow)
import sys
from PIL import Image
from rawkit.raw import Raw
src = sys.argv[1]
dest = sys.argv[2]
with Raw(filename=src) as raw:
rgb_buffer = raw.to_buffer()
# Convert the buffer from [r, g, b, r...] to [(r, g, b), (r, g, b)...]
rgb_tuples = [
tuple(rgb_buffer[i:i+3]) for i in range(0, len(rgb_buffer), 3)
]
image = Image.new('RGB', [raw.metadata.width, raw.metadata.height])
image.putdata(rgb_tuples)
image.save(dest)
|
Add example saving jpg using pillow# Usage: python examples/save_jpeg_using_pil.py src.CR2 dest.jpg
# Requires PIL (pip install pillow)
import sys
from PIL import Image
from rawkit.raw import Raw
src = sys.argv[1]
dest = sys.argv[2]
with Raw(filename=src) as raw:
rgb_buffer = raw.to_buffer()
# Convert the buffer from [r, g, b, r...] to [(r, g, b), (r, g, b)...]
rgb_tuples = [
tuple(rgb_buffer[i:i+3]) for i in range(0, len(rgb_buffer), 3)
]
image = Image.new('RGB', [raw.metadata.width, raw.metadata.height])
image.putdata(rgb_tuples)
image.save(dest)
|
<commit_before><commit_msg>Add example saving jpg using pillow<commit_after># Usage: python examples/save_jpeg_using_pil.py src.CR2 dest.jpg
# Requires PIL (pip install pillow)
import sys
from PIL import Image
from rawkit.raw import Raw
src = sys.argv[1]
dest = sys.argv[2]
with Raw(filename=src) as raw:
rgb_buffer = raw.to_buffer()
# Convert the buffer from [r, g, b, r...] to [(r, g, b), (r, g, b)...]
rgb_tuples = [
tuple(rgb_buffer[i:i+3]) for i in range(0, len(rgb_buffer), 3)
]
image = Image.new('RGB', [raw.metadata.width, raw.metadata.height])
image.putdata(rgb_tuples)
image.save(dest)
|
|
8bd7e0a8f1b3bd69faf454dd8a779fd5b4d4acbd
|
heat/tests/test_cli.py
|
heat/tests/test_cli.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nose.plugins.attrib import attr
import unittest
import heat
import os
import subprocess
basepath = os.path.join(heat.__path__[0], os.path.pardir)
@attr(tag=['unit', 'cli'])
@attr(speed='medium')
class CliTest(unittest.TestCase):
def test_bins(self):
bins = ['heat-cfn', 'heat-boto', 'heat-watch']
for bin in bins:
fullpath = basepath + '/bin/' + bin
proc = subprocess.Popen(fullpath,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
if proc.returncode:
print 'Error executing %s:\n %s %s ' % (bin, stdout, stderr)
raise subprocess.CalledProcessError(proc.returncode, bin)
|
Add test to simply run a few binaries
|
Add test to simply run a few binaries
This is not really a unit test, but it's so fast that I think it could
be considered as such. And it would be nice to gate on such blatant
breakage.
Change-Id: I8ff4ca27a912c30bd962168418ac44217ea9e54d
Signed-off-by: Jeff Peeler <d776211e63e47e40d00501ffdb86a800e0782fea@redhat.com>
|
Python
|
apache-2.0
|
varunarya10/heat,dragorosson/heat,rh-s/heat,jasondunsmore/heat,Triv90/Heat,ntt-sic/heat,citrix-openstack-build/heat,pratikmallya/heat,redhat-openstack/heat,pshchelo/heat,Triv90/Heat,openstack/heat,maestro-hybrid-cloud/heat,dims/heat,rickerc/heat_audit,rh-s/heat,miguelgrinberg/heat,cwolferh/heat-scratch,srznew/heat,Triv90/Heat,NeCTAR-RC/heat,takeshineshiro/heat,JioCloud/heat,noironetworks/heat,maestro-hybrid-cloud/heat,openstack/heat,cryptickp/heat,gonzolino/heat,noironetworks/heat,dims/heat,cryptickp/heat,steveb/heat,pratikmallya/heat,takeshineshiro/heat,miguelgrinberg/heat,citrix-openstack-build/heat,redhat-openstack/heat,steveb/heat,NeCTAR-RC/heat,ntt-sic/heat,srznew/heat,rdo-management/heat,cwolferh/heat-scratch,jasondunsmore/heat,JioCloud/heat,dragorosson/heat,rdo-management/heat,varunarya10/heat,gonzolino/heat,rickerc/heat_audit,pshchelo/heat
|
Add test to simply run a few binaries
This is not really a unit test, but it's so fast that I think it could
be considered as such. And it would be nice to gate on such blatant
breakage.
Change-Id: I8ff4ca27a912c30bd962168418ac44217ea9e54d
Signed-off-by: Jeff Peeler <d776211e63e47e40d00501ffdb86a800e0782fea@redhat.com>
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nose.plugins.attrib import attr
import unittest
import heat
import os
import subprocess
basepath = os.path.join(heat.__path__[0], os.path.pardir)
@attr(tag=['unit', 'cli'])
@attr(speed='medium')
class CliTest(unittest.TestCase):
def test_bins(self):
bins = ['heat-cfn', 'heat-boto', 'heat-watch']
for bin in bins:
fullpath = basepath + '/bin/' + bin
proc = subprocess.Popen(fullpath,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
if proc.returncode:
print 'Error executing %s:\n %s %s ' % (bin, stdout, stderr)
raise subprocess.CalledProcessError(proc.returncode, bin)
|
<commit_before><commit_msg>Add test to simply run a few binaries
This is not really a unit test, but it's so fast that I think it could
be considered as such. And it would be nice to gate on such blatant
breakage.
Change-Id: I8ff4ca27a912c30bd962168418ac44217ea9e54d
Signed-off-by: Jeff Peeler <d776211e63e47e40d00501ffdb86a800e0782fea@redhat.com><commit_after>
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nose.plugins.attrib import attr
import unittest
import heat
import os
import subprocess
basepath = os.path.join(heat.__path__[0], os.path.pardir)
@attr(tag=['unit', 'cli'])
@attr(speed='medium')
class CliTest(unittest.TestCase):
def test_bins(self):
bins = ['heat-cfn', 'heat-boto', 'heat-watch']
for bin in bins:
fullpath = basepath + '/bin/' + bin
proc = subprocess.Popen(fullpath,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
if proc.returncode:
print 'Error executing %s:\n %s %s ' % (bin, stdout, stderr)
raise subprocess.CalledProcessError(proc.returncode, bin)
|
Add test to simply run a few binaries
This is not really a unit test, but it's so fast that I think it could
be considered as such. And it would be nice to gate on such blatant
breakage.
Change-Id: I8ff4ca27a912c30bd962168418ac44217ea9e54d
Signed-off-by: Jeff Peeler <d776211e63e47e40d00501ffdb86a800e0782fea@redhat.com># vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nose.plugins.attrib import attr
import unittest
import heat
import os
import subprocess
basepath = os.path.join(heat.__path__[0], os.path.pardir)
@attr(tag=['unit', 'cli'])
@attr(speed='medium')
class CliTest(unittest.TestCase):
def test_bins(self):
bins = ['heat-cfn', 'heat-boto', 'heat-watch']
for bin in bins:
fullpath = basepath + '/bin/' + bin
proc = subprocess.Popen(fullpath,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
if proc.returncode:
print 'Error executing %s:\n %s %s ' % (bin, stdout, stderr)
raise subprocess.CalledProcessError(proc.returncode, bin)
|
<commit_before><commit_msg>Add test to simply run a few binaries
This is not really a unit test, but it's so fast that I think it could
be considered as such. And it would be nice to gate on such blatant
breakage.
Change-Id: I8ff4ca27a912c30bd962168418ac44217ea9e54d
Signed-off-by: Jeff Peeler <d776211e63e47e40d00501ffdb86a800e0782fea@redhat.com><commit_after># vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nose.plugins.attrib import attr
import unittest
import heat
import os
import subprocess
basepath = os.path.join(heat.__path__[0], os.path.pardir)
@attr(tag=['unit', 'cli'])
@attr(speed='medium')
class CliTest(unittest.TestCase):
def test_bins(self):
bins = ['heat-cfn', 'heat-boto', 'heat-watch']
for bin in bins:
fullpath = basepath + '/bin/' + bin
proc = subprocess.Popen(fullpath,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
if proc.returncode:
print 'Error executing %s:\n %s %s ' % (bin, stdout, stderr)
raise subprocess.CalledProcessError(proc.returncode, bin)
|
|
2a343aa13fb000ffb56173e77207fe14c3bbe85c
|
mezzanine/accounts/models.py
|
mezzanine/accounts/models.py
|
from django.db import DatabaseError, connection
from django.db.models.signals import post_save
from mezzanine.accounts import get_profile_user_fieldname
from mezzanine.conf import settings
from mezzanine.utils.models import lazy_model_ops
__all__ = ()
if getattr(settings, "AUTH_PROFILE_MODULE", None):
# This will be called when class_prepared signal has been sent
# for both the profile and user model.
def wait_for_models(profile_model, user_model):
user_field = get_profile_user_fieldname(profile_model, user_model)
def create_profile(sender, instance, created, **kwargs):
if created:
try:
kwargs = {str(user_field): instance}
profile_model.objects.create(**kwargs)
except DatabaseError:
# User creation in initial syncdb may have been triggered,
# while profile model is under migration management and
# doesn't exist yet. We close the connection so that it
# gets re-opened, allowing syncdb to continue and complete.
connection.close()
post_save.connect(create_profile, sender=user_model, weak=False)
lazy_model_ops.add(wait_for_models,
settings.AUTH_PROFILE_MODULE, settings.AUTH_USER_MODEL)
|
from django.db import DatabaseError, connection
from django.db.models.signals import post_save
from mezzanine.accounts import get_profile_user_fieldname, get_profile_for_user
from mezzanine.conf import settings
from mezzanine.utils.models import lazy_model_ops
__all__ = ()
if getattr(settings, "AUTH_PROFILE_MODULE", None):
# This will be called when class_prepared signal has been sent
# for both the profile and user model.
def wait_for_models(profile_model, user_model):
user_field = get_profile_user_fieldname(profile_model, user_model)
def create_profile(sender, instance, created, **_):
if created:
try:
get_profile_for_user(instance)
except DatabaseError:
# User creation in initial syncdb may have been triggered,
# while profile model is under migration management and
# doesn't exist yet. We close the connection so that it
# gets re-opened, allowing syncdb to continue and complete.
connection.close()
post_save.connect(create_profile, sender=user_model, weak=False)
lazy_model_ops.add(wait_for_models,
settings.AUTH_PROFILE_MODULE, settings.AUTH_USER_MODEL)
|
Use get_profile_for_user() in profile signal handler
|
Use get_profile_for_user() in profile signal handler
|
Python
|
bsd-2-clause
|
joshcartme/mezzanine,vladir/mezzanine,industrydive/mezzanine,SoLoHiC/mezzanine,sjdines/mezzanine,dustinrb/mezzanine,spookylukey/mezzanine,damnfine/mezzanine,sjdines/mezzanine,frankchin/mezzanine,dsanders11/mezzanine,biomassives/mezzanine,frankchin/mezzanine,agepoly/mezzanine,wyzex/mezzanine,dsanders11/mezzanine,emile2016/mezzanine,douglaskastle/mezzanine,dekomote/mezzanine-modeltranslation-backport,jjz/mezzanine,tuxinhang1989/mezzanine,sjuxax/mezzanine,nikolas/mezzanine,Cajoline/mezzanine,jerivas/mezzanine,molokov/mezzanine,saintbird/mezzanine,viaregio/mezzanine,dovydas/mezzanine,Skytorn86/mezzanine,jerivas/mezzanine,molokov/mezzanine,Kniyl/mezzanine,wbtuomela/mezzanine,Cicero-Zhao/mezzanine,joshcartme/mezzanine,readevalprint/mezzanine,mush42/mezzanine,jerivas/mezzanine,gradel/mezzanine,promil23/mezzanine,viaregio/mezzanine,frankier/mezzanine,christianwgd/mezzanine,Cicero-Zhao/mezzanine,ryneeverett/mezzanine,douglaskastle/mezzanine,PegasusWang/mezzanine,ZeroXn/mezzanine,joshcartme/mezzanine,Cajoline/mezzanine,wbtuomela/mezzanine,tuxinhang1989/mezzanine,frankchin/mezzanine,mush42/mezzanine,jjz/mezzanine,ryneeverett/mezzanine,dsanders11/mezzanine,dovydas/mezzanine,Cajoline/mezzanine,fusionbox/mezzanine,wbtuomela/mezzanine,adrian-the-git/mezzanine,fusionbox/mezzanine,wyzex/mezzanine,jjz/mezzanine,ZeroXn/mezzanine,webounty/mezzanine,webounty/mezzanine,gradel/mezzanine,industrydive/mezzanine,eino-makitalo/mezzanine,agepoly/mezzanine,SoLoHiC/mezzanine,theclanks/mezzanine,sjdines/mezzanine,christianwgd/mezzanine,promil23/mezzanine,eino-makitalo/mezzanine,theclanks/mezzanine,ryneeverett/mezzanine,spookylukey/mezzanine,PegasusWang/mezzanine,webounty/mezzanine,biomassives/mezzanine,geodesign/mezzanine,damnfine/mezzanine,saintbird/mezzanine,agepoly/mezzanine,emile2016/mezzanine,dustinrb/mezzanine,nikolas/mezzanine,emile2016/mezzanine,geodesign/mezzanine,ZeroXn/mezzanine,eino-makitalo/mezzanine,mush42/mezzanine,Skytorn86/mezzanine,tuxinhang1989/mezzanine,Kniyl/mezzanine,dustinrb/mezzanine,geodesign/mezzanine,stephenmcd/mezzanine,saintbird/mezzanine,spookylukey/mezzanine,damnfine/mezzanine,gradel/mezzanine,SoLoHiC/mezzanine,PegasusWang/mezzanine,vladir/mezzanine,dovydas/mezzanine,adrian-the-git/mezzanine,AlexHill/mezzanine,Skytorn86/mezzanine,douglaskastle/mezzanine,dekomote/mezzanine-modeltranslation-backport,biomassives/mezzanine,promil23/mezzanine,industrydive/mezzanine,Kniyl/mezzanine,sjuxax/mezzanine,viaregio/mezzanine,dekomote/mezzanine-modeltranslation-backport,christianwgd/mezzanine,readevalprint/mezzanine,sjuxax/mezzanine,nikolas/mezzanine,wyzex/mezzanine,stephenmcd/mezzanine,theclanks/mezzanine,frankier/mezzanine,molokov/mezzanine,frankier/mezzanine,stephenmcd/mezzanine,adrian-the-git/mezzanine,vladir/mezzanine,AlexHill/mezzanine,readevalprint/mezzanine
|
from django.db import DatabaseError, connection
from django.db.models.signals import post_save
from mezzanine.accounts import get_profile_user_fieldname
from mezzanine.conf import settings
from mezzanine.utils.models import lazy_model_ops
__all__ = ()
if getattr(settings, "AUTH_PROFILE_MODULE", None):
# This will be called when class_prepared signal has been sent
# for both the profile and user model.
def wait_for_models(profile_model, user_model):
user_field = get_profile_user_fieldname(profile_model, user_model)
def create_profile(sender, instance, created, **kwargs):
if created:
try:
kwargs = {str(user_field): instance}
profile_model.objects.create(**kwargs)
except DatabaseError:
# User creation in initial syncdb may have been triggered,
# while profile model is under migration management and
# doesn't exist yet. We close the connection so that it
# gets re-opened, allowing syncdb to continue and complete.
connection.close()
post_save.connect(create_profile, sender=user_model, weak=False)
lazy_model_ops.add(wait_for_models,
settings.AUTH_PROFILE_MODULE, settings.AUTH_USER_MODEL)
Use get_profile_for_user() in profile signal handler
|
from django.db import DatabaseError, connection
from django.db.models.signals import post_save
from mezzanine.accounts import get_profile_user_fieldname, get_profile_for_user
from mezzanine.conf import settings
from mezzanine.utils.models import lazy_model_ops
__all__ = ()
if getattr(settings, "AUTH_PROFILE_MODULE", None):
# This will be called when class_prepared signal has been sent
# for both the profile and user model.
def wait_for_models(profile_model, user_model):
user_field = get_profile_user_fieldname(profile_model, user_model)
def create_profile(sender, instance, created, **_):
if created:
try:
get_profile_for_user(instance)
except DatabaseError:
# User creation in initial syncdb may have been triggered,
# while profile model is under migration management and
# doesn't exist yet. We close the connection so that it
# gets re-opened, allowing syncdb to continue and complete.
connection.close()
post_save.connect(create_profile, sender=user_model, weak=False)
lazy_model_ops.add(wait_for_models,
settings.AUTH_PROFILE_MODULE, settings.AUTH_USER_MODEL)
|
<commit_before>from django.db import DatabaseError, connection
from django.db.models.signals import post_save
from mezzanine.accounts import get_profile_user_fieldname
from mezzanine.conf import settings
from mezzanine.utils.models import lazy_model_ops
__all__ = ()
if getattr(settings, "AUTH_PROFILE_MODULE", None):
# This will be called when class_prepared signal has been sent
# for both the profile and user model.
def wait_for_models(profile_model, user_model):
user_field = get_profile_user_fieldname(profile_model, user_model)
def create_profile(sender, instance, created, **kwargs):
if created:
try:
kwargs = {str(user_field): instance}
profile_model.objects.create(**kwargs)
except DatabaseError:
# User creation in initial syncdb may have been triggered,
# while profile model is under migration management and
# doesn't exist yet. We close the connection so that it
# gets re-opened, allowing syncdb to continue and complete.
connection.close()
post_save.connect(create_profile, sender=user_model, weak=False)
lazy_model_ops.add(wait_for_models,
settings.AUTH_PROFILE_MODULE, settings.AUTH_USER_MODEL)
<commit_msg>Use get_profile_for_user() in profile signal handler<commit_after>
|
from django.db import DatabaseError, connection
from django.db.models.signals import post_save
from mezzanine.accounts import get_profile_user_fieldname, get_profile_for_user
from mezzanine.conf import settings
from mezzanine.utils.models import lazy_model_ops
__all__ = ()
if getattr(settings, "AUTH_PROFILE_MODULE", None):
# This will be called when class_prepared signal has been sent
# for both the profile and user model.
def wait_for_models(profile_model, user_model):
user_field = get_profile_user_fieldname(profile_model, user_model)
def create_profile(sender, instance, created, **_):
if created:
try:
get_profile_for_user(instance)
except DatabaseError:
# User creation in initial syncdb may have been triggered,
# while profile model is under migration management and
# doesn't exist yet. We close the connection so that it
# gets re-opened, allowing syncdb to continue and complete.
connection.close()
post_save.connect(create_profile, sender=user_model, weak=False)
lazy_model_ops.add(wait_for_models,
settings.AUTH_PROFILE_MODULE, settings.AUTH_USER_MODEL)
|
from django.db import DatabaseError, connection
from django.db.models.signals import post_save
from mezzanine.accounts import get_profile_user_fieldname
from mezzanine.conf import settings
from mezzanine.utils.models import lazy_model_ops
__all__ = ()
if getattr(settings, "AUTH_PROFILE_MODULE", None):
# This will be called when class_prepared signal has been sent
# for both the profile and user model.
def wait_for_models(profile_model, user_model):
user_field = get_profile_user_fieldname(profile_model, user_model)
def create_profile(sender, instance, created, **kwargs):
if created:
try:
kwargs = {str(user_field): instance}
profile_model.objects.create(**kwargs)
except DatabaseError:
# User creation in initial syncdb may have been triggered,
# while profile model is under migration management and
# doesn't exist yet. We close the connection so that it
# gets re-opened, allowing syncdb to continue and complete.
connection.close()
post_save.connect(create_profile, sender=user_model, weak=False)
lazy_model_ops.add(wait_for_models,
settings.AUTH_PROFILE_MODULE, settings.AUTH_USER_MODEL)
Use get_profile_for_user() in profile signal handlerfrom django.db import DatabaseError, connection
from django.db.models.signals import post_save
from mezzanine.accounts import get_profile_user_fieldname, get_profile_for_user
from mezzanine.conf import settings
from mezzanine.utils.models import lazy_model_ops
__all__ = ()
if getattr(settings, "AUTH_PROFILE_MODULE", None):
# This will be called when class_prepared signal has been sent
# for both the profile and user model.
def wait_for_models(profile_model, user_model):
user_field = get_profile_user_fieldname(profile_model, user_model)
def create_profile(sender, instance, created, **_):
if created:
try:
get_profile_for_user(instance)
except DatabaseError:
# User creation in initial syncdb may have been triggered,
# while profile model is under migration management and
# doesn't exist yet. We close the connection so that it
# gets re-opened, allowing syncdb to continue and complete.
connection.close()
post_save.connect(create_profile, sender=user_model, weak=False)
lazy_model_ops.add(wait_for_models,
settings.AUTH_PROFILE_MODULE, settings.AUTH_USER_MODEL)
|
<commit_before>from django.db import DatabaseError, connection
from django.db.models.signals import post_save
from mezzanine.accounts import get_profile_user_fieldname
from mezzanine.conf import settings
from mezzanine.utils.models import lazy_model_ops
__all__ = ()
if getattr(settings, "AUTH_PROFILE_MODULE", None):
# This will be called when class_prepared signal has been sent
# for both the profile and user model.
def wait_for_models(profile_model, user_model):
user_field = get_profile_user_fieldname(profile_model, user_model)
def create_profile(sender, instance, created, **kwargs):
if created:
try:
kwargs = {str(user_field): instance}
profile_model.objects.create(**kwargs)
except DatabaseError:
# User creation in initial syncdb may have been triggered,
# while profile model is under migration management and
# doesn't exist yet. We close the connection so that it
# gets re-opened, allowing syncdb to continue and complete.
connection.close()
post_save.connect(create_profile, sender=user_model, weak=False)
lazy_model_ops.add(wait_for_models,
settings.AUTH_PROFILE_MODULE, settings.AUTH_USER_MODEL)
<commit_msg>Use get_profile_for_user() in profile signal handler<commit_after>from django.db import DatabaseError, connection
from django.db.models.signals import post_save
from mezzanine.accounts import get_profile_user_fieldname, get_profile_for_user
from mezzanine.conf import settings
from mezzanine.utils.models import lazy_model_ops
__all__ = ()
if getattr(settings, "AUTH_PROFILE_MODULE", None):
# This will be called when class_prepared signal has been sent
# for both the profile and user model.
def wait_for_models(profile_model, user_model):
user_field = get_profile_user_fieldname(profile_model, user_model)
def create_profile(sender, instance, created, **_):
if created:
try:
get_profile_for_user(instance)
except DatabaseError:
# User creation in initial syncdb may have been triggered,
# while profile model is under migration management and
# doesn't exist yet. We close the connection so that it
# gets re-opened, allowing syncdb to continue and complete.
connection.close()
post_save.connect(create_profile, sender=user_model, weak=False)
lazy_model_ops.add(wait_for_models,
settings.AUTH_PROFILE_MODULE, settings.AUTH_USER_MODEL)
|
16d6dd8f3f2359a3a5d83eac9d2812560ab2d6bf
|
microcosm_pubsub/handlers.py
|
microcosm_pubsub/handlers.py
|
"""
Handler base classes.
"""
from abc import ABCMeta
from inflection import humanize
from requests import get
class URIHandler(object):
"""
Base handler for URI-driven events.
As a general rule, we want PubSub events to convey the URI of a resource that was created
(because resources are ideally immutable state). In this case, we want asynchronous workers
to query the existing URI to get more information (and to handle race conditions where the
message was delivered before the resource was committed.)
There are five expected outcomes for this handler:
- Raising an error (e.g. a bug)
- Skipping the handlers (because the pubsub message carried enough information to bypass processing)
- Handling the message after fetching the resource by URI
- Ignore the message after fetching the resource by URI
- Raising a nack (e.g. because the resource was not committed yet)
The middle three cases are all handled here with the expectation that we produce *one* INFO-level
log per message processed (unless an error/nack is raised).
"""
__metaclass__ = ABCMeta
@property
def name(self):
return humanize(self.__class__.__name__)
def __call__(self, message):
uri = message["uri"]
self.on_call(message, uri)
skip_reason = self.get_reason_to_skip(message, uri)
if skip_reason is not None:
self.on_skip(message, uri, skip_reason)
return False
resource = self.get_resource(uri=uri)
if self.handle(message, uri, resource):
self.on_handle(message, uri, resource)
return True
else:
self.on_ignore(message, uri, resource)
return False
def on_call(self, message, uri):
self.logger.debug(
"Starting {handler}",
extra=dict(
handler=self.name,
uri=uri,
),
)
def on_skip(self, message, uri, reason):
self.logger.info(
"Skipping {handler} because {reason}",
extra=dict(
handler=self.name,
reason=reason,
uri=uri,
),
)
def on_handle(self, message, uri, resource):
self.logger.info(
"Handled {handler}",
extra=dict(
handler=self.name,
uri=uri,
),
)
def on_ignore(self, message, uri, resource):
self.logger.info(
"Ignored {handler}",
extra=dict(
handler=self.name,
uri=uri,
),
)
def get_reason_to_skip(self, message, uri):
"""
Some messages carry enough context that we can avoid resolving the URI entirely.
"""
return None
def get_resource(self, uri):
"""
Mock-friendly URI getter.
"""
response = get(uri)
response.raise_for_status()
return response.json()
def handle(self, message, resource):
return True
|
Add a base handler for URI-oriented pubsub messages
|
Add a base handler for URI-oriented pubsub messages
|
Python
|
apache-2.0
|
globality-corp/microcosm-pubsub,globality-corp/microcosm-pubsub
|
Add a base handler for URI-oriented pubsub messages
|
"""
Handler base classes.
"""
from abc import ABCMeta
from inflection import humanize
from requests import get
class URIHandler(object):
"""
Base handler for URI-driven events.
As a general rule, we want PubSub events to convey the URI of a resource that was created
(because resources are ideally immutable state). In this case, we want asynchronous workers
to query the existing URI to get more information (and to handle race conditions where the
message was delivered before the resource was committed.)
There are five expected outcomes for this handler:
- Raising an error (e.g. a bug)
- Skipping the handlers (because the pubsub message carried enough information to bypass processing)
- Handling the message after fetching the resource by URI
- Ignore the message after fetching the resource by URI
- Raising a nack (e.g. because the resource was not committed yet)
The middle three cases are all handled here with the expectation that we produce *one* INFO-level
log per message processed (unless an error/nack is raised).
"""
__metaclass__ = ABCMeta
@property
def name(self):
return humanize(self.__class__.__name__)
def __call__(self, message):
uri = message["uri"]
self.on_call(message, uri)
skip_reason = self.get_reason_to_skip(message, uri)
if skip_reason is not None:
self.on_skip(message, uri, skip_reason)
return False
resource = self.get_resource(uri=uri)
if self.handle(message, uri, resource):
self.on_handle(message, uri, resource)
return True
else:
self.on_ignore(message, uri, resource)
return False
def on_call(self, message, uri):
self.logger.debug(
"Starting {handler}",
extra=dict(
handler=self.name,
uri=uri,
),
)
def on_skip(self, message, uri, reason):
self.logger.info(
"Skipping {handler} because {reason}",
extra=dict(
handler=self.name,
reason=reason,
uri=uri,
),
)
def on_handle(self, message, uri, resource):
self.logger.info(
"Handled {handler}",
extra=dict(
handler=self.name,
uri=uri,
),
)
def on_ignore(self, message, uri, resource):
self.logger.info(
"Ignored {handler}",
extra=dict(
handler=self.name,
uri=uri,
),
)
def get_reason_to_skip(self, message, uri):
"""
Some messages carry enough context that we can avoid resolving the URI entirely.
"""
return None
def get_resource(self, uri):
"""
Mock-friendly URI getter.
"""
response = get(uri)
response.raise_for_status()
return response.json()
def handle(self, message, resource):
return True
|
<commit_before><commit_msg>Add a base handler for URI-oriented pubsub messages<commit_after>
|
"""
Handler base classes.
"""
from abc import ABCMeta
from inflection import humanize
from requests import get
class URIHandler(object):
"""
Base handler for URI-driven events.
As a general rule, we want PubSub events to convey the URI of a resource that was created
(because resources are ideally immutable state). In this case, we want asynchronous workers
to query the existing URI to get more information (and to handle race conditions where the
message was delivered before the resource was committed.)
There are five expected outcomes for this handler:
- Raising an error (e.g. a bug)
- Skipping the handlers (because the pubsub message carried enough information to bypass processing)
- Handling the message after fetching the resource by URI
- Ignore the message after fetching the resource by URI
- Raising a nack (e.g. because the resource was not committed yet)
The middle three cases are all handled here with the expectation that we produce *one* INFO-level
log per message processed (unless an error/nack is raised).
"""
__metaclass__ = ABCMeta
@property
def name(self):
return humanize(self.__class__.__name__)
def __call__(self, message):
uri = message["uri"]
self.on_call(message, uri)
skip_reason = self.get_reason_to_skip(message, uri)
if skip_reason is not None:
self.on_skip(message, uri, skip_reason)
return False
resource = self.get_resource(uri=uri)
if self.handle(message, uri, resource):
self.on_handle(message, uri, resource)
return True
else:
self.on_ignore(message, uri, resource)
return False
def on_call(self, message, uri):
self.logger.debug(
"Starting {handler}",
extra=dict(
handler=self.name,
uri=uri,
),
)
def on_skip(self, message, uri, reason):
self.logger.info(
"Skipping {handler} because {reason}",
extra=dict(
handler=self.name,
reason=reason,
uri=uri,
),
)
def on_handle(self, message, uri, resource):
self.logger.info(
"Handled {handler}",
extra=dict(
handler=self.name,
uri=uri,
),
)
def on_ignore(self, message, uri, resource):
self.logger.info(
"Ignored {handler}",
extra=dict(
handler=self.name,
uri=uri,
),
)
def get_reason_to_skip(self, message, uri):
"""
Some messages carry enough context that we can avoid resolving the URI entirely.
"""
return None
def get_resource(self, uri):
"""
Mock-friendly URI getter.
"""
response = get(uri)
response.raise_for_status()
return response.json()
def handle(self, message, resource):
return True
|
Add a base handler for URI-oriented pubsub messages"""
Handler base classes.
"""
from abc import ABCMeta
from inflection import humanize
from requests import get
class URIHandler(object):
"""
Base handler for URI-driven events.
As a general rule, we want PubSub events to convey the URI of a resource that was created
(because resources are ideally immutable state). In this case, we want asynchronous workers
to query the existing URI to get more information (and to handle race conditions where the
message was delivered before the resource was committed.)
There are five expected outcomes for this handler:
- Raising an error (e.g. a bug)
- Skipping the handlers (because the pubsub message carried enough information to bypass processing)
- Handling the message after fetching the resource by URI
- Ignore the message after fetching the resource by URI
- Raising a nack (e.g. because the resource was not committed yet)
The middle three cases are all handled here with the expectation that we produce *one* INFO-level
log per message processed (unless an error/nack is raised).
"""
__metaclass__ = ABCMeta
@property
def name(self):
return humanize(self.__class__.__name__)
def __call__(self, message):
uri = message["uri"]
self.on_call(message, uri)
skip_reason = self.get_reason_to_skip(message, uri)
if skip_reason is not None:
self.on_skip(message, uri, skip_reason)
return False
resource = self.get_resource(uri=uri)
if self.handle(message, uri, resource):
self.on_handle(message, uri, resource)
return True
else:
self.on_ignore(message, uri, resource)
return False
def on_call(self, message, uri):
self.logger.debug(
"Starting {handler}",
extra=dict(
handler=self.name,
uri=uri,
),
)
def on_skip(self, message, uri, reason):
self.logger.info(
"Skipping {handler} because {reason}",
extra=dict(
handler=self.name,
reason=reason,
uri=uri,
),
)
def on_handle(self, message, uri, resource):
self.logger.info(
"Handled {handler}",
extra=dict(
handler=self.name,
uri=uri,
),
)
def on_ignore(self, message, uri, resource):
self.logger.info(
"Ignored {handler}",
extra=dict(
handler=self.name,
uri=uri,
),
)
def get_reason_to_skip(self, message, uri):
"""
Some messages carry enough context that we can avoid resolving the URI entirely.
"""
return None
def get_resource(self, uri):
"""
Mock-friendly URI getter.
"""
response = get(uri)
response.raise_for_status()
return response.json()
def handle(self, message, resource):
return True
|
<commit_before><commit_msg>Add a base handler for URI-oriented pubsub messages<commit_after>"""
Handler base classes.
"""
from abc import ABCMeta
from inflection import humanize
from requests import get
class URIHandler(object):
"""
Base handler for URI-driven events.
As a general rule, we want PubSub events to convey the URI of a resource that was created
(because resources are ideally immutable state). In this case, we want asynchronous workers
to query the existing URI to get more information (and to handle race conditions where the
message was delivered before the resource was committed.)
There are five expected outcomes for this handler:
- Raising an error (e.g. a bug)
- Skipping the handlers (because the pubsub message carried enough information to bypass processing)
- Handling the message after fetching the resource by URI
- Ignore the message after fetching the resource by URI
- Raising a nack (e.g. because the resource was not committed yet)
The middle three cases are all handled here with the expectation that we produce *one* INFO-level
log per message processed (unless an error/nack is raised).
"""
__metaclass__ = ABCMeta
@property
def name(self):
return humanize(self.__class__.__name__)
def __call__(self, message):
uri = message["uri"]
self.on_call(message, uri)
skip_reason = self.get_reason_to_skip(message, uri)
if skip_reason is not None:
self.on_skip(message, uri, skip_reason)
return False
resource = self.get_resource(uri=uri)
if self.handle(message, uri, resource):
self.on_handle(message, uri, resource)
return True
else:
self.on_ignore(message, uri, resource)
return False
def on_call(self, message, uri):
self.logger.debug(
"Starting {handler}",
extra=dict(
handler=self.name,
uri=uri,
),
)
def on_skip(self, message, uri, reason):
self.logger.info(
"Skipping {handler} because {reason}",
extra=dict(
handler=self.name,
reason=reason,
uri=uri,
),
)
def on_handle(self, message, uri, resource):
self.logger.info(
"Handled {handler}",
extra=dict(
handler=self.name,
uri=uri,
),
)
def on_ignore(self, message, uri, resource):
self.logger.info(
"Ignored {handler}",
extra=dict(
handler=self.name,
uri=uri,
),
)
def get_reason_to_skip(self, message, uri):
"""
Some messages carry enough context that we can avoid resolving the URI entirely.
"""
return None
def get_resource(self, uri):
"""
Mock-friendly URI getter.
"""
response = get(uri)
response.raise_for_status()
return response.json()
def handle(self, message, resource):
return True
|
|
df4601af8ce70e48ffd4362556c2b07e4a6f53db
|
nettests/experimental/script.py
|
nettests/experimental/script.py
|
from ooni import nettest
from ooni.utils import log
from twisted.internet import defer, protocol, reactor
from twisted.python import usage
import os
def which(program):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
class UsageOptions(usage.Options):
optParameters = [
['interpreter', 'i', '', 'The interpreter to use'],
['script', 's', '', 'The script to run']
]
class ScriptProcessProtocol(protocol.ProcessProtocol):
def __init__(self, test_case):
self.test_case = test_case
self.deferred = defer.Deferred()
def connectionMade(self):
log.debug("connectionMade")
self.transport.closeStdin()
self.test_case.report['lua_output'] = ""
def outReceived(self, data):
log.debug('outReceived: %s' % data)
self.test_case.report['lua_output'] += data
def errReceived(self, data):
log.err('Script error: %s' % data)
self.transport.signalProcess('KILL')
def processEnded(self, status):
rc = status.value.exitCode
log.debug('processEnded: %s, %s' % \
(rc, self.test_case.report['lua_output']))
if rc == 0:
self.deferred.callback(self)
else:
self.deferred.errback(rc)
# TODO: Maybe the script requires a back-end.
class Script(nettest.NetTestCase):
name = "Script test"
version = "0.1"
authors = "Dominic Hamon"
usageOptions = UsageOptions
requiredOptions = ['interpreter', 'script']
def test_run_script(self):
"""
We run the script specified in the usage options and take whatever
is printed to stdout as the results of the test.
"""
processProtocol = ScriptProcessProtocol(self)
interpreter = self.localOptions['interpreter']
if not which(interpreter):
log.err('Unable to find %s executable in PATH.' % interpreter)
return
reactor.spawnProcess(processProtocol,
interpreter,
args=[interpreter, self.localOptions['script']],
env={'HOME': os.environ['HOME']},
usePTY=True)
if not reactor.running:
reactor.run()
return processProtocol.deferred
|
Add Dominic Hamon's nettest for running tests written with other interpreters.
|
Add Dominic Hamon's nettest for running tests written with other interpreters.
* Fixes #8011.
|
Python
|
bsd-2-clause
|
kdmurray91/ooni-probe,Karthikeyan-kkk/ooni-probe,Karthikeyan-kkk/ooni-probe,kdmurray91/ooni-probe,lordappsec/ooni-probe,lordappsec/ooni-probe,lordappsec/ooni-probe,0xPoly/ooni-probe,juga0/ooni-probe,Karthikeyan-kkk/ooni-probe,0xPoly/ooni-probe,0xPoly/ooni-probe,juga0/ooni-probe,juga0/ooni-probe,kdmurray91/ooni-probe,kdmurray91/ooni-probe,Karthikeyan-kkk/ooni-probe,juga0/ooni-probe,0xPoly/ooni-probe,lordappsec/ooni-probe
|
Add Dominic Hamon's nettest for running tests written with other interpreters.
* Fixes #8011.
|
from ooni import nettest
from ooni.utils import log
from twisted.internet import defer, protocol, reactor
from twisted.python import usage
import os
def which(program):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
class UsageOptions(usage.Options):
optParameters = [
['interpreter', 'i', '', 'The interpreter to use'],
['script', 's', '', 'The script to run']
]
class ScriptProcessProtocol(protocol.ProcessProtocol):
def __init__(self, test_case):
self.test_case = test_case
self.deferred = defer.Deferred()
def connectionMade(self):
log.debug("connectionMade")
self.transport.closeStdin()
self.test_case.report['lua_output'] = ""
def outReceived(self, data):
log.debug('outReceived: %s' % data)
self.test_case.report['lua_output'] += data
def errReceived(self, data):
log.err('Script error: %s' % data)
self.transport.signalProcess('KILL')
def processEnded(self, status):
rc = status.value.exitCode
log.debug('processEnded: %s, %s' % \
(rc, self.test_case.report['lua_output']))
if rc == 0:
self.deferred.callback(self)
else:
self.deferred.errback(rc)
# TODO: Maybe the script requires a back-end.
class Script(nettest.NetTestCase):
name = "Script test"
version = "0.1"
authors = "Dominic Hamon"
usageOptions = UsageOptions
requiredOptions = ['interpreter', 'script']
def test_run_script(self):
"""
We run the script specified in the usage options and take whatever
is printed to stdout as the results of the test.
"""
processProtocol = ScriptProcessProtocol(self)
interpreter = self.localOptions['interpreter']
if not which(interpreter):
log.err('Unable to find %s executable in PATH.' % interpreter)
return
reactor.spawnProcess(processProtocol,
interpreter,
args=[interpreter, self.localOptions['script']],
env={'HOME': os.environ['HOME']},
usePTY=True)
if not reactor.running:
reactor.run()
return processProtocol.deferred
|
<commit_before><commit_msg>Add Dominic Hamon's nettest for running tests written with other interpreters.
* Fixes #8011.<commit_after>
|
from ooni import nettest
from ooni.utils import log
from twisted.internet import defer, protocol, reactor
from twisted.python import usage
import os
def which(program):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
class UsageOptions(usage.Options):
optParameters = [
['interpreter', 'i', '', 'The interpreter to use'],
['script', 's', '', 'The script to run']
]
class ScriptProcessProtocol(protocol.ProcessProtocol):
def __init__(self, test_case):
self.test_case = test_case
self.deferred = defer.Deferred()
def connectionMade(self):
log.debug("connectionMade")
self.transport.closeStdin()
self.test_case.report['lua_output'] = ""
def outReceived(self, data):
log.debug('outReceived: %s' % data)
self.test_case.report['lua_output'] += data
def errReceived(self, data):
log.err('Script error: %s' % data)
self.transport.signalProcess('KILL')
def processEnded(self, status):
rc = status.value.exitCode
log.debug('processEnded: %s, %s' % \
(rc, self.test_case.report['lua_output']))
if rc == 0:
self.deferred.callback(self)
else:
self.deferred.errback(rc)
# TODO: Maybe the script requires a back-end.
class Script(nettest.NetTestCase):
name = "Script test"
version = "0.1"
authors = "Dominic Hamon"
usageOptions = UsageOptions
requiredOptions = ['interpreter', 'script']
def test_run_script(self):
"""
We run the script specified in the usage options and take whatever
is printed to stdout as the results of the test.
"""
processProtocol = ScriptProcessProtocol(self)
interpreter = self.localOptions['interpreter']
if not which(interpreter):
log.err('Unable to find %s executable in PATH.' % interpreter)
return
reactor.spawnProcess(processProtocol,
interpreter,
args=[interpreter, self.localOptions['script']],
env={'HOME': os.environ['HOME']},
usePTY=True)
if not reactor.running:
reactor.run()
return processProtocol.deferred
|
Add Dominic Hamon's nettest for running tests written with other interpreters.
* Fixes #8011.from ooni import nettest
from ooni.utils import log
from twisted.internet import defer, protocol, reactor
from twisted.python import usage
import os
def which(program):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
class UsageOptions(usage.Options):
optParameters = [
['interpreter', 'i', '', 'The interpreter to use'],
['script', 's', '', 'The script to run']
]
class ScriptProcessProtocol(protocol.ProcessProtocol):
def __init__(self, test_case):
self.test_case = test_case
self.deferred = defer.Deferred()
def connectionMade(self):
log.debug("connectionMade")
self.transport.closeStdin()
self.test_case.report['lua_output'] = ""
def outReceived(self, data):
log.debug('outReceived: %s' % data)
self.test_case.report['lua_output'] += data
def errReceived(self, data):
log.err('Script error: %s' % data)
self.transport.signalProcess('KILL')
def processEnded(self, status):
rc = status.value.exitCode
log.debug('processEnded: %s, %s' % \
(rc, self.test_case.report['lua_output']))
if rc == 0:
self.deferred.callback(self)
else:
self.deferred.errback(rc)
# TODO: Maybe the script requires a back-end.
class Script(nettest.NetTestCase):
name = "Script test"
version = "0.1"
authors = "Dominic Hamon"
usageOptions = UsageOptions
requiredOptions = ['interpreter', 'script']
def test_run_script(self):
"""
We run the script specified in the usage options and take whatever
is printed to stdout as the results of the test.
"""
processProtocol = ScriptProcessProtocol(self)
interpreter = self.localOptions['interpreter']
if not which(interpreter):
log.err('Unable to find %s executable in PATH.' % interpreter)
return
reactor.spawnProcess(processProtocol,
interpreter,
args=[interpreter, self.localOptions['script']],
env={'HOME': os.environ['HOME']},
usePTY=True)
if not reactor.running:
reactor.run()
return processProtocol.deferred
|
<commit_before><commit_msg>Add Dominic Hamon's nettest for running tests written with other interpreters.
* Fixes #8011.<commit_after>from ooni import nettest
from ooni.utils import log
from twisted.internet import defer, protocol, reactor
from twisted.python import usage
import os
def which(program):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
class UsageOptions(usage.Options):
optParameters = [
['interpreter', 'i', '', 'The interpreter to use'],
['script', 's', '', 'The script to run']
]
class ScriptProcessProtocol(protocol.ProcessProtocol):
def __init__(self, test_case):
self.test_case = test_case
self.deferred = defer.Deferred()
def connectionMade(self):
log.debug("connectionMade")
self.transport.closeStdin()
self.test_case.report['lua_output'] = ""
def outReceived(self, data):
log.debug('outReceived: %s' % data)
self.test_case.report['lua_output'] += data
def errReceived(self, data):
log.err('Script error: %s' % data)
self.transport.signalProcess('KILL')
def processEnded(self, status):
rc = status.value.exitCode
log.debug('processEnded: %s, %s' % \
(rc, self.test_case.report['lua_output']))
if rc == 0:
self.deferred.callback(self)
else:
self.deferred.errback(rc)
# TODO: Maybe the script requires a back-end.
class Script(nettest.NetTestCase):
name = "Script test"
version = "0.1"
authors = "Dominic Hamon"
usageOptions = UsageOptions
requiredOptions = ['interpreter', 'script']
def test_run_script(self):
"""
We run the script specified in the usage options and take whatever
is printed to stdout as the results of the test.
"""
processProtocol = ScriptProcessProtocol(self)
interpreter = self.localOptions['interpreter']
if not which(interpreter):
log.err('Unable to find %s executable in PATH.' % interpreter)
return
reactor.spawnProcess(processProtocol,
interpreter,
args=[interpreter, self.localOptions['script']],
env={'HOME': os.environ['HOME']},
usePTY=True)
if not reactor.running:
reactor.run()
return processProtocol.deferred
|
|
3c4a91171c3588a918415801f39442a5733dcfd4
|
tests/test_rst.py
|
tests/test_rst.py
|
# -*- coding: utf-8 -*-
import sys
import os
import pytest
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../src/epicslide"))
import rst
class TestRst(object):
def test_pygments(self):
p = rst.Pygments('')
print p.__dict__
print p.run()
assert False
def html_parts(self):
assert False
def html_body(self):
assert False
|
Structure for the Rst test
|
Structure for the Rst test
|
Python
|
apache-2.0
|
netantho/epicslide,netantho/epicslide
|
Structure for the Rst test
|
# -*- coding: utf-8 -*-
import sys
import os
import pytest
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../src/epicslide"))
import rst
class TestRst(object):
def test_pygments(self):
p = rst.Pygments('')
print p.__dict__
print p.run()
assert False
def html_parts(self):
assert False
def html_body(self):
assert False
|
<commit_before><commit_msg>Structure for the Rst test<commit_after>
|
# -*- coding: utf-8 -*-
import sys
import os
import pytest
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../src/epicslide"))
import rst
class TestRst(object):
def test_pygments(self):
p = rst.Pygments('')
print p.__dict__
print p.run()
assert False
def html_parts(self):
assert False
def html_body(self):
assert False
|
Structure for the Rst test# -*- coding: utf-8 -*-
import sys
import os
import pytest
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../src/epicslide"))
import rst
class TestRst(object):
def test_pygments(self):
p = rst.Pygments('')
print p.__dict__
print p.run()
assert False
def html_parts(self):
assert False
def html_body(self):
assert False
|
<commit_before><commit_msg>Structure for the Rst test<commit_after># -*- coding: utf-8 -*-
import sys
import os
import pytest
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../src/epicslide"))
import rst
class TestRst(object):
def test_pygments(self):
p = rst.Pygments('')
print p.__dict__
print p.run()
assert False
def html_parts(self):
assert False
def html_body(self):
assert False
|
|
88021ff39f63a6d4616028a8a7e5226e1e706e93
|
cmsplugin_zinnia/cms_toolbar.py
|
cmsplugin_zinnia/cms_toolbar.py
|
"""Toolbar extensions for CMS"""
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from cms.toolbar_base import CMSToolbar
from cms.toolbar_pool import toolbar_pool
class ZinniaToolbar(CMSToolbar):
def populate(self):
zinnia_menu = self.toolbar.get_or_create_menu(
'zinnia-menu', _('Zinnia'))
url = reverse('admin:zinnia_entry_add')
zinnia_menu.add_sideframe_item(_('New entry'), url=url)
url = reverse('admin:zinnia_category_add')
zinnia_menu.add_sideframe_item(_('New category'), url=url)
zinnia_menu.add_break()
url = reverse('admin:zinnia_entry_changelist')
zinnia_menu.add_sideframe_item(_('Entries list'), url=url)
url = reverse('admin:zinnia_category_changelist')
zinnia_menu.add_sideframe_item(_('Categories list'), url=url)
url = reverse('admin:tagging_tag_changelist')
zinnia_menu.add_sideframe_item(_('Tags list'), url=url)
toolbar_pool.register(ZinniaToolbar)
|
Add a toolbar for Zinnia
|
Add a toolbar for Zinnia
|
Python
|
bsd-3-clause
|
bittner/cmsplugin-zinnia,bittner/cmsplugin-zinnia,django-blog-zinnia/cmsplugin-zinnia,bittner/cmsplugin-zinnia,django-blog-zinnia/cmsplugin-zinnia,django-blog-zinnia/cmsplugin-zinnia
|
Add a toolbar for Zinnia
|
"""Toolbar extensions for CMS"""
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from cms.toolbar_base import CMSToolbar
from cms.toolbar_pool import toolbar_pool
class ZinniaToolbar(CMSToolbar):
def populate(self):
zinnia_menu = self.toolbar.get_or_create_menu(
'zinnia-menu', _('Zinnia'))
url = reverse('admin:zinnia_entry_add')
zinnia_menu.add_sideframe_item(_('New entry'), url=url)
url = reverse('admin:zinnia_category_add')
zinnia_menu.add_sideframe_item(_('New category'), url=url)
zinnia_menu.add_break()
url = reverse('admin:zinnia_entry_changelist')
zinnia_menu.add_sideframe_item(_('Entries list'), url=url)
url = reverse('admin:zinnia_category_changelist')
zinnia_menu.add_sideframe_item(_('Categories list'), url=url)
url = reverse('admin:tagging_tag_changelist')
zinnia_menu.add_sideframe_item(_('Tags list'), url=url)
toolbar_pool.register(ZinniaToolbar)
|
<commit_before><commit_msg>Add a toolbar for Zinnia<commit_after>
|
"""Toolbar extensions for CMS"""
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from cms.toolbar_base import CMSToolbar
from cms.toolbar_pool import toolbar_pool
class ZinniaToolbar(CMSToolbar):
def populate(self):
zinnia_menu = self.toolbar.get_or_create_menu(
'zinnia-menu', _('Zinnia'))
url = reverse('admin:zinnia_entry_add')
zinnia_menu.add_sideframe_item(_('New entry'), url=url)
url = reverse('admin:zinnia_category_add')
zinnia_menu.add_sideframe_item(_('New category'), url=url)
zinnia_menu.add_break()
url = reverse('admin:zinnia_entry_changelist')
zinnia_menu.add_sideframe_item(_('Entries list'), url=url)
url = reverse('admin:zinnia_category_changelist')
zinnia_menu.add_sideframe_item(_('Categories list'), url=url)
url = reverse('admin:tagging_tag_changelist')
zinnia_menu.add_sideframe_item(_('Tags list'), url=url)
toolbar_pool.register(ZinniaToolbar)
|
Add a toolbar for Zinnia"""Toolbar extensions for CMS"""
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from cms.toolbar_base import CMSToolbar
from cms.toolbar_pool import toolbar_pool
class ZinniaToolbar(CMSToolbar):
def populate(self):
zinnia_menu = self.toolbar.get_or_create_menu(
'zinnia-menu', _('Zinnia'))
url = reverse('admin:zinnia_entry_add')
zinnia_menu.add_sideframe_item(_('New entry'), url=url)
url = reverse('admin:zinnia_category_add')
zinnia_menu.add_sideframe_item(_('New category'), url=url)
zinnia_menu.add_break()
url = reverse('admin:zinnia_entry_changelist')
zinnia_menu.add_sideframe_item(_('Entries list'), url=url)
url = reverse('admin:zinnia_category_changelist')
zinnia_menu.add_sideframe_item(_('Categories list'), url=url)
url = reverse('admin:tagging_tag_changelist')
zinnia_menu.add_sideframe_item(_('Tags list'), url=url)
toolbar_pool.register(ZinniaToolbar)
|
<commit_before><commit_msg>Add a toolbar for Zinnia<commit_after>"""Toolbar extensions for CMS"""
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from cms.toolbar_base import CMSToolbar
from cms.toolbar_pool import toolbar_pool
class ZinniaToolbar(CMSToolbar):
def populate(self):
zinnia_menu = self.toolbar.get_or_create_menu(
'zinnia-menu', _('Zinnia'))
url = reverse('admin:zinnia_entry_add')
zinnia_menu.add_sideframe_item(_('New entry'), url=url)
url = reverse('admin:zinnia_category_add')
zinnia_menu.add_sideframe_item(_('New category'), url=url)
zinnia_menu.add_break()
url = reverse('admin:zinnia_entry_changelist')
zinnia_menu.add_sideframe_item(_('Entries list'), url=url)
url = reverse('admin:zinnia_category_changelist')
zinnia_menu.add_sideframe_item(_('Categories list'), url=url)
url = reverse('admin:tagging_tag_changelist')
zinnia_menu.add_sideframe_item(_('Tags list'), url=url)
toolbar_pool.register(ZinniaToolbar)
|
|
4d774d2707779885384dafa00be7bc0617133989
|
src/ocspdash/custom_columns.py
|
src/ocspdash/custom_columns.py
|
from sqlalchemy.types import TypeDecorator, BINARY
from sqlalchemy.dialects.postgresql import UUID
import uuid
class UUID(TypeDecorator):
"""Platform-independent UUID type.
Uses Postgresql's UUID type, otherwise uses
BINARY(16).
Based on http://docs.sqlalchemy.org/en/rel_0_9/core/custom_types.html?highlight=guid#backend-agnostic-guid-type
"""
impl = BINARY
def load_dialect_impl(self, dialect):
if dialect.name == 'postgresql':
return dialect.type_descriptor(UUID())
else:
return dialect.type_descriptor(BINARY)
def process_bind_param(self, value, dialect):
if value is None:
return value
elif dialect.name == 'postgresql':
return str(value)
else:
if not isinstance(value, uuid.UUID):
return uuid.UUID(value).bytes
else:
# hex string
return value.bytes
def process_result_value(self, value, dialect):
if value is None:
return value
else:
return uuid.UUID(bytes=value)
|
Create a custom UUID column type
|
Create a custom UUID column type
|
Python
|
mit
|
scolby33/OCSPdash,scolby33/OCSPdash,scolby33/OCSPdash
|
Create a custom UUID column type
|
from sqlalchemy.types import TypeDecorator, BINARY
from sqlalchemy.dialects.postgresql import UUID
import uuid
class UUID(TypeDecorator):
"""Platform-independent UUID type.
Uses Postgresql's UUID type, otherwise uses
BINARY(16).
Based on http://docs.sqlalchemy.org/en/rel_0_9/core/custom_types.html?highlight=guid#backend-agnostic-guid-type
"""
impl = BINARY
def load_dialect_impl(self, dialect):
if dialect.name == 'postgresql':
return dialect.type_descriptor(UUID())
else:
return dialect.type_descriptor(BINARY)
def process_bind_param(self, value, dialect):
if value is None:
return value
elif dialect.name == 'postgresql':
return str(value)
else:
if not isinstance(value, uuid.UUID):
return uuid.UUID(value).bytes
else:
# hex string
return value.bytes
def process_result_value(self, value, dialect):
if value is None:
return value
else:
return uuid.UUID(bytes=value)
|
<commit_before><commit_msg>Create a custom UUID column type<commit_after>
|
from sqlalchemy.types import TypeDecorator, BINARY
from sqlalchemy.dialects.postgresql import UUID
import uuid
class UUID(TypeDecorator):
"""Platform-independent UUID type.
Uses Postgresql's UUID type, otherwise uses
BINARY(16).
Based on http://docs.sqlalchemy.org/en/rel_0_9/core/custom_types.html?highlight=guid#backend-agnostic-guid-type
"""
impl = BINARY
def load_dialect_impl(self, dialect):
if dialect.name == 'postgresql':
return dialect.type_descriptor(UUID())
else:
return dialect.type_descriptor(BINARY)
def process_bind_param(self, value, dialect):
if value is None:
return value
elif dialect.name == 'postgresql':
return str(value)
else:
if not isinstance(value, uuid.UUID):
return uuid.UUID(value).bytes
else:
# hex string
return value.bytes
def process_result_value(self, value, dialect):
if value is None:
return value
else:
return uuid.UUID(bytes=value)
|
Create a custom UUID column typefrom sqlalchemy.types import TypeDecorator, BINARY
from sqlalchemy.dialects.postgresql import UUID
import uuid
class UUID(TypeDecorator):
"""Platform-independent UUID type.
Uses Postgresql's UUID type, otherwise uses
BINARY(16).
Based on http://docs.sqlalchemy.org/en/rel_0_9/core/custom_types.html?highlight=guid#backend-agnostic-guid-type
"""
impl = BINARY
def load_dialect_impl(self, dialect):
if dialect.name == 'postgresql':
return dialect.type_descriptor(UUID())
else:
return dialect.type_descriptor(BINARY)
def process_bind_param(self, value, dialect):
if value is None:
return value
elif dialect.name == 'postgresql':
return str(value)
else:
if not isinstance(value, uuid.UUID):
return uuid.UUID(value).bytes
else:
# hex string
return value.bytes
def process_result_value(self, value, dialect):
if value is None:
return value
else:
return uuid.UUID(bytes=value)
|
<commit_before><commit_msg>Create a custom UUID column type<commit_after>from sqlalchemy.types import TypeDecorator, BINARY
from sqlalchemy.dialects.postgresql import UUID
import uuid
class UUID(TypeDecorator):
"""Platform-independent UUID type.
Uses Postgresql's UUID type, otherwise uses
BINARY(16).
Based on http://docs.sqlalchemy.org/en/rel_0_9/core/custom_types.html?highlight=guid#backend-agnostic-guid-type
"""
impl = BINARY
def load_dialect_impl(self, dialect):
if dialect.name == 'postgresql':
return dialect.type_descriptor(UUID())
else:
return dialect.type_descriptor(BINARY)
def process_bind_param(self, value, dialect):
if value is None:
return value
elif dialect.name == 'postgresql':
return str(value)
else:
if not isinstance(value, uuid.UUID):
return uuid.UUID(value).bytes
else:
# hex string
return value.bytes
def process_result_value(self, value, dialect):
if value is None:
return value
else:
return uuid.UUID(bytes=value)
|
|
4a1ea1545c6428f3695c001ef9960ea696d20a36
|
test_utilities/src/d1_test/instance_generator/sciobj.py
|
test_utilities/src/d1_test/instance_generator/sciobj.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
import re
import StringIO
import d1_common.xml
import d1_test.d1_test_case
import d1_test.instance_generator.identifier
import d1_test.instance_generator.system_metadata
def generate_reproducible(client, pid=None, option_dict=None):
"""Generate science object bytes and a random, fully populated System Metadata
object that are always the same for a given PID.
The PID can be seen as a handle through which the same science object bytes
and sysmeta can always be retrieved.
"""
option_dict = option_dict or {}
pid = pid or d1_test.instance_generator.identifier.generate_pid()
option_dict['identifier'] = pid
with d1_test.d1_test_case.reproducible_random_context(pid):
sciobj_str = generate_reproducible_sciobj_str(pid)
sysmeta_pyxb = (
d1_test.instance_generator.system_metadata.generate_from_file(
client, StringIO.StringIO(sciobj_str), option_dict
)
)
return (
pid, d1_common.xml.get_value(sysmeta_pyxb, 'seriesId'), sciobj_str,
sysmeta_pyxb
)
def generate_reproducible_sciobj_str(pid):
"""Return a science object byte string that is always the same for a given PID
"""
undecorated_pid = re.sub(r'^<.*?>', '', pid)
with d1_test.d1_test_case.reproducible_random_context(undecorated_pid):
return (
'These are the reproducible Science Object bytes for pid="{}". '
'What follows is 100 to 200 random bytes: '.
format(undecorated_pid.encode('utf-8')) + str(
bytearray(
random.getrandbits(8) for _ in range(random.randint(100, 200))
)
)
)
|
Add instance generator for complete reproducible objects
|
Add instance generator for complete reproducible objects
|
Python
|
apache-2.0
|
DataONEorg/d1_python,DataONEorg/d1_python,DataONEorg/d1_python,DataONEorg/d1_python
|
Add instance generator for complete reproducible objects
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
import re
import StringIO
import d1_common.xml
import d1_test.d1_test_case
import d1_test.instance_generator.identifier
import d1_test.instance_generator.system_metadata
def generate_reproducible(client, pid=None, option_dict=None):
"""Generate science object bytes and a random, fully populated System Metadata
object that are always the same for a given PID.
The PID can be seen as a handle through which the same science object bytes
and sysmeta can always be retrieved.
"""
option_dict = option_dict or {}
pid = pid or d1_test.instance_generator.identifier.generate_pid()
option_dict['identifier'] = pid
with d1_test.d1_test_case.reproducible_random_context(pid):
sciobj_str = generate_reproducible_sciobj_str(pid)
sysmeta_pyxb = (
d1_test.instance_generator.system_metadata.generate_from_file(
client, StringIO.StringIO(sciobj_str), option_dict
)
)
return (
pid, d1_common.xml.get_value(sysmeta_pyxb, 'seriesId'), sciobj_str,
sysmeta_pyxb
)
def generate_reproducible_sciobj_str(pid):
"""Return a science object byte string that is always the same for a given PID
"""
undecorated_pid = re.sub(r'^<.*?>', '', pid)
with d1_test.d1_test_case.reproducible_random_context(undecorated_pid):
return (
'These are the reproducible Science Object bytes for pid="{}". '
'What follows is 100 to 200 random bytes: '.
format(undecorated_pid.encode('utf-8')) + str(
bytearray(
random.getrandbits(8) for _ in range(random.randint(100, 200))
)
)
)
|
<commit_before><commit_msg>Add instance generator for complete reproducible objects<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
import re
import StringIO
import d1_common.xml
import d1_test.d1_test_case
import d1_test.instance_generator.identifier
import d1_test.instance_generator.system_metadata
def generate_reproducible(client, pid=None, option_dict=None):
"""Generate science object bytes and a random, fully populated System Metadata
object that are always the same for a given PID.
The PID can be seen as a handle through which the same science object bytes
and sysmeta can always be retrieved.
"""
option_dict = option_dict or {}
pid = pid or d1_test.instance_generator.identifier.generate_pid()
option_dict['identifier'] = pid
with d1_test.d1_test_case.reproducible_random_context(pid):
sciobj_str = generate_reproducible_sciobj_str(pid)
sysmeta_pyxb = (
d1_test.instance_generator.system_metadata.generate_from_file(
client, StringIO.StringIO(sciobj_str), option_dict
)
)
return (
pid, d1_common.xml.get_value(sysmeta_pyxb, 'seriesId'), sciobj_str,
sysmeta_pyxb
)
def generate_reproducible_sciobj_str(pid):
"""Return a science object byte string that is always the same for a given PID
"""
undecorated_pid = re.sub(r'^<.*?>', '', pid)
with d1_test.d1_test_case.reproducible_random_context(undecorated_pid):
return (
'These are the reproducible Science Object bytes for pid="{}". '
'What follows is 100 to 200 random bytes: '.
format(undecorated_pid.encode('utf-8')) + str(
bytearray(
random.getrandbits(8) for _ in range(random.randint(100, 200))
)
)
)
|
Add instance generator for complete reproducible objects#!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
import re
import StringIO
import d1_common.xml
import d1_test.d1_test_case
import d1_test.instance_generator.identifier
import d1_test.instance_generator.system_metadata
def generate_reproducible(client, pid=None, option_dict=None):
"""Generate science object bytes and a random, fully populated System Metadata
object that are always the same for a given PID.
The PID can be seen as a handle through which the same science object bytes
and sysmeta can always be retrieved.
"""
option_dict = option_dict or {}
pid = pid or d1_test.instance_generator.identifier.generate_pid()
option_dict['identifier'] = pid
with d1_test.d1_test_case.reproducible_random_context(pid):
sciobj_str = generate_reproducible_sciobj_str(pid)
sysmeta_pyxb = (
d1_test.instance_generator.system_metadata.generate_from_file(
client, StringIO.StringIO(sciobj_str), option_dict
)
)
return (
pid, d1_common.xml.get_value(sysmeta_pyxb, 'seriesId'), sciobj_str,
sysmeta_pyxb
)
def generate_reproducible_sciobj_str(pid):
"""Return a science object byte string that is always the same for a given PID
"""
undecorated_pid = re.sub(r'^<.*?>', '', pid)
with d1_test.d1_test_case.reproducible_random_context(undecorated_pid):
return (
'These are the reproducible Science Object bytes for pid="{}". '
'What follows is 100 to 200 random bytes: '.
format(undecorated_pid.encode('utf-8')) + str(
bytearray(
random.getrandbits(8) for _ in range(random.randint(100, 200))
)
)
)
|
<commit_before><commit_msg>Add instance generator for complete reproducible objects<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
import re
import StringIO
import d1_common.xml
import d1_test.d1_test_case
import d1_test.instance_generator.identifier
import d1_test.instance_generator.system_metadata
def generate_reproducible(client, pid=None, option_dict=None):
"""Generate science object bytes and a random, fully populated System Metadata
object that are always the same for a given PID.
The PID can be seen as a handle through which the same science object bytes
and sysmeta can always be retrieved.
"""
option_dict = option_dict or {}
pid = pid or d1_test.instance_generator.identifier.generate_pid()
option_dict['identifier'] = pid
with d1_test.d1_test_case.reproducible_random_context(pid):
sciobj_str = generate_reproducible_sciobj_str(pid)
sysmeta_pyxb = (
d1_test.instance_generator.system_metadata.generate_from_file(
client, StringIO.StringIO(sciobj_str), option_dict
)
)
return (
pid, d1_common.xml.get_value(sysmeta_pyxb, 'seriesId'), sciobj_str,
sysmeta_pyxb
)
def generate_reproducible_sciobj_str(pid):
"""Return a science object byte string that is always the same for a given PID
"""
undecorated_pid = re.sub(r'^<.*?>', '', pid)
with d1_test.d1_test_case.reproducible_random_context(undecorated_pid):
return (
'These are the reproducible Science Object bytes for pid="{}". '
'What follows is 100 to 200 random bytes: '.
format(undecorated_pid.encode('utf-8')) + str(
bytearray(
random.getrandbits(8) for _ in range(random.randint(100, 200))
)
)
)
|
|
96731d45f56a6fd2d6a11aa3e2f595c84e52cb37
|
tests/test_core_lexer.py
|
tests/test_core_lexer.py
|
# -*- coding: utf-8 -*-
import sshrc.core.lexer as lexer
import pytest
@pytest.mark.parametrize("input_, output_", (
("", ""),
(" ", ""),
(" #", ""),
("# ", ""),
(" # dsfsdfsdf sdfsdfsd", ""),
(" a", " a"),
(" a# sdfsfdf", " a"),
(" a # sdfsfsd x xxxxxxx # sdfsfd", " a")
))
def test_clean_line(input_, output_):
assert lexer.clean_line(input_) == output_
|
Add tests for cleaning line
|
Add tests for cleaning line
|
Python
|
mit
|
9seconds/concierge,9seconds/sshrc
|
Add tests for cleaning line
|
# -*- coding: utf-8 -*-
import sshrc.core.lexer as lexer
import pytest
@pytest.mark.parametrize("input_, output_", (
("", ""),
(" ", ""),
(" #", ""),
("# ", ""),
(" # dsfsdfsdf sdfsdfsd", ""),
(" a", " a"),
(" a# sdfsfdf", " a"),
(" a # sdfsfsd x xxxxxxx # sdfsfd", " a")
))
def test_clean_line(input_, output_):
assert lexer.clean_line(input_) == output_
|
<commit_before><commit_msg>Add tests for cleaning line<commit_after>
|
# -*- coding: utf-8 -*-
import sshrc.core.lexer as lexer
import pytest
@pytest.mark.parametrize("input_, output_", (
("", ""),
(" ", ""),
(" #", ""),
("# ", ""),
(" # dsfsdfsdf sdfsdfsd", ""),
(" a", " a"),
(" a# sdfsfdf", " a"),
(" a # sdfsfsd x xxxxxxx # sdfsfd", " a")
))
def test_clean_line(input_, output_):
assert lexer.clean_line(input_) == output_
|
Add tests for cleaning line# -*- coding: utf-8 -*-
import sshrc.core.lexer as lexer
import pytest
@pytest.mark.parametrize("input_, output_", (
("", ""),
(" ", ""),
(" #", ""),
("# ", ""),
(" # dsfsdfsdf sdfsdfsd", ""),
(" a", " a"),
(" a# sdfsfdf", " a"),
(" a # sdfsfsd x xxxxxxx # sdfsfd", " a")
))
def test_clean_line(input_, output_):
assert lexer.clean_line(input_) == output_
|
<commit_before><commit_msg>Add tests for cleaning line<commit_after># -*- coding: utf-8 -*-
import sshrc.core.lexer as lexer
import pytest
@pytest.mark.parametrize("input_, output_", (
("", ""),
(" ", ""),
(" #", ""),
("# ", ""),
(" # dsfsdfsdf sdfsdfsd", ""),
(" a", " a"),
(" a# sdfsfdf", " a"),
(" a # sdfsfsd x xxxxxxx # sdfsfd", " a")
))
def test_clean_line(input_, output_):
assert lexer.clean_line(input_) == output_
|
|
32cb1273be3ee0f5ec9a831287c8a5116dc07d24
|
tests/test_versioning.py
|
tests/test_versioning.py
|
# -*- coding: utf-8 -*-
#
import unittest
from nose import tools
from kitchen.versioning import version_tuple_to_string
# Note: Using nose's generator tests for this so we can't subclass
# unittest.TestCase
class TestVersionTuple(object):
ver_to_tuple = {'1': ((1,),),
'1.0': ((1, 0),),
'1.0.0': ((1, 0, 0),),
'1.0a1': ((1, 0), ('a', 1)),
'1.0rc1': ((1, 0), ('rc', 1)),
'1.0rc1.2': ((1, 0), ('rc', 1, 2)),
'1.0.dev345': ((1, 0), ('dev', 345)),
'1.0a1.dev345': ((1, 0), ('a', 1), ('dev', 345)),
'1.0a1.2.dev345': ((1, 0), ('a', 1, 2), ('dev', 345)),
}
def check_ver_tuple_to_str(self, v_tuple, v_str):
tools.ok_(version_tuple_to_string(v_tuple) == v_str)
def test_version_tuple_to_string(self):
'''Test that version_tuple_to_string outputs PEP-386 compliant strings
'''
for v_str, v_tuple in self.ver_to_tuple.items():
#tools.ok_(version_tuple_to_string(v_tuple) == v_str)
yield self.check_ver_tuple_to_str, v_tuple, v_str
|
Test for the new versioning sub package
|
Test for the new versioning sub package
|
Python
|
lgpl-2.1
|
fedora-infra/kitchen,fedora-infra/kitchen
|
Test for the new versioning sub package
|
# -*- coding: utf-8 -*-
#
import unittest
from nose import tools
from kitchen.versioning import version_tuple_to_string
# Note: Using nose's generator tests for this so we can't subclass
# unittest.TestCase
class TestVersionTuple(object):
ver_to_tuple = {'1': ((1,),),
'1.0': ((1, 0),),
'1.0.0': ((1, 0, 0),),
'1.0a1': ((1, 0), ('a', 1)),
'1.0rc1': ((1, 0), ('rc', 1)),
'1.0rc1.2': ((1, 0), ('rc', 1, 2)),
'1.0.dev345': ((1, 0), ('dev', 345)),
'1.0a1.dev345': ((1, 0), ('a', 1), ('dev', 345)),
'1.0a1.2.dev345': ((1, 0), ('a', 1, 2), ('dev', 345)),
}
def check_ver_tuple_to_str(self, v_tuple, v_str):
tools.ok_(version_tuple_to_string(v_tuple) == v_str)
def test_version_tuple_to_string(self):
'''Test that version_tuple_to_string outputs PEP-386 compliant strings
'''
for v_str, v_tuple in self.ver_to_tuple.items():
#tools.ok_(version_tuple_to_string(v_tuple) == v_str)
yield self.check_ver_tuple_to_str, v_tuple, v_str
|
<commit_before><commit_msg>Test for the new versioning sub package<commit_after>
|
# -*- coding: utf-8 -*-
#
import unittest
from nose import tools
from kitchen.versioning import version_tuple_to_string
# Note: Using nose's generator tests for this so we can't subclass
# unittest.TestCase
class TestVersionTuple(object):
ver_to_tuple = {'1': ((1,),),
'1.0': ((1, 0),),
'1.0.0': ((1, 0, 0),),
'1.0a1': ((1, 0), ('a', 1)),
'1.0rc1': ((1, 0), ('rc', 1)),
'1.0rc1.2': ((1, 0), ('rc', 1, 2)),
'1.0.dev345': ((1, 0), ('dev', 345)),
'1.0a1.dev345': ((1, 0), ('a', 1), ('dev', 345)),
'1.0a1.2.dev345': ((1, 0), ('a', 1, 2), ('dev', 345)),
}
def check_ver_tuple_to_str(self, v_tuple, v_str):
tools.ok_(version_tuple_to_string(v_tuple) == v_str)
def test_version_tuple_to_string(self):
'''Test that version_tuple_to_string outputs PEP-386 compliant strings
'''
for v_str, v_tuple in self.ver_to_tuple.items():
#tools.ok_(version_tuple_to_string(v_tuple) == v_str)
yield self.check_ver_tuple_to_str, v_tuple, v_str
|
Test for the new versioning sub package# -*- coding: utf-8 -*-
#
import unittest
from nose import tools
from kitchen.versioning import version_tuple_to_string
# Note: Using nose's generator tests for this so we can't subclass
# unittest.TestCase
class TestVersionTuple(object):
ver_to_tuple = {'1': ((1,),),
'1.0': ((1, 0),),
'1.0.0': ((1, 0, 0),),
'1.0a1': ((1, 0), ('a', 1)),
'1.0rc1': ((1, 0), ('rc', 1)),
'1.0rc1.2': ((1, 0), ('rc', 1, 2)),
'1.0.dev345': ((1, 0), ('dev', 345)),
'1.0a1.dev345': ((1, 0), ('a', 1), ('dev', 345)),
'1.0a1.2.dev345': ((1, 0), ('a', 1, 2), ('dev', 345)),
}
def check_ver_tuple_to_str(self, v_tuple, v_str):
tools.ok_(version_tuple_to_string(v_tuple) == v_str)
def test_version_tuple_to_string(self):
'''Test that version_tuple_to_string outputs PEP-386 compliant strings
'''
for v_str, v_tuple in self.ver_to_tuple.items():
#tools.ok_(version_tuple_to_string(v_tuple) == v_str)
yield self.check_ver_tuple_to_str, v_tuple, v_str
|
<commit_before><commit_msg>Test for the new versioning sub package<commit_after># -*- coding: utf-8 -*-
#
import unittest
from nose import tools
from kitchen.versioning import version_tuple_to_string
# Note: Using nose's generator tests for this so we can't subclass
# unittest.TestCase
class TestVersionTuple(object):
ver_to_tuple = {'1': ((1,),),
'1.0': ((1, 0),),
'1.0.0': ((1, 0, 0),),
'1.0a1': ((1, 0), ('a', 1)),
'1.0rc1': ((1, 0), ('rc', 1)),
'1.0rc1.2': ((1, 0), ('rc', 1, 2)),
'1.0.dev345': ((1, 0), ('dev', 345)),
'1.0a1.dev345': ((1, 0), ('a', 1), ('dev', 345)),
'1.0a1.2.dev345': ((1, 0), ('a', 1, 2), ('dev', 345)),
}
def check_ver_tuple_to_str(self, v_tuple, v_str):
tools.ok_(version_tuple_to_string(v_tuple) == v_str)
def test_version_tuple_to_string(self):
'''Test that version_tuple_to_string outputs PEP-386 compliant strings
'''
for v_str, v_tuple in self.ver_to_tuple.items():
#tools.ok_(version_tuple_to_string(v_tuple) == v_str)
yield self.check_ver_tuple_to_str, v_tuple, v_str
|
|
9aafdd8f00b96105e86d23c1936da620d0540cbe
|
python/twitter_status_ids.py
|
python/twitter_status_ids.py
|
import sys
from datetime import datetime
# use like:
# twilight stream --filter "locations=-180,-90,180,90" | grep --line-buffered -v '^{"delete":' | jq --unbuffered -r .id_str | gstdbuf -o0 head -1000 | python -u /Users/chbrown/github/sandbox/python/twitter_status_ids.py
TWEPOCH = 1288834974657
while True:
line = sys.stdin.readline()
if not line: break # EOF
status_id = int(line)
ticks = (status_id >> 22) + TWEPOCH
print ticks
# print datetime.fromtimestamp(ticks / 1000.0)
# print out only the milliseconds part
# print ticks % 1000
# sys.stdout.write('%d\n' % (ticks % 1000))
# sys.stdout.flush()
|
Add python script that converts Twitter IDs to timestamps
|
Add python script that converts Twitter IDs to timestamps
|
Python
|
mit
|
chbrown/sandbox,chbrown/sandbox,chbrown/sandbox,chbrown/sandbox,chbrown/sandbox,chbrown/sandbox,chbrown/sandbox,chbrown/sandbox,chbrown/sandbox,chbrown/sandbox
|
Add python script that converts Twitter IDs to timestamps
|
import sys
from datetime import datetime
# use like:
# twilight stream --filter "locations=-180,-90,180,90" | grep --line-buffered -v '^{"delete":' | jq --unbuffered -r .id_str | gstdbuf -o0 head -1000 | python -u /Users/chbrown/github/sandbox/python/twitter_status_ids.py
TWEPOCH = 1288834974657
while True:
line = sys.stdin.readline()
if not line: break # EOF
status_id = int(line)
ticks = (status_id >> 22) + TWEPOCH
print ticks
# print datetime.fromtimestamp(ticks / 1000.0)
# print out only the milliseconds part
# print ticks % 1000
# sys.stdout.write('%d\n' % (ticks % 1000))
# sys.stdout.flush()
|
<commit_before><commit_msg>Add python script that converts Twitter IDs to timestamps<commit_after>
|
import sys
from datetime import datetime
# use like:
# twilight stream --filter "locations=-180,-90,180,90" | grep --line-buffered -v '^{"delete":' | jq --unbuffered -r .id_str | gstdbuf -o0 head -1000 | python -u /Users/chbrown/github/sandbox/python/twitter_status_ids.py
TWEPOCH = 1288834974657
while True:
line = sys.stdin.readline()
if not line: break # EOF
status_id = int(line)
ticks = (status_id >> 22) + TWEPOCH
print ticks
# print datetime.fromtimestamp(ticks / 1000.0)
# print out only the milliseconds part
# print ticks % 1000
# sys.stdout.write('%d\n' % (ticks % 1000))
# sys.stdout.flush()
|
Add python script that converts Twitter IDs to timestampsimport sys
from datetime import datetime
# use like:
# twilight stream --filter "locations=-180,-90,180,90" | grep --line-buffered -v '^{"delete":' | jq --unbuffered -r .id_str | gstdbuf -o0 head -1000 | python -u /Users/chbrown/github/sandbox/python/twitter_status_ids.py
TWEPOCH = 1288834974657
while True:
line = sys.stdin.readline()
if not line: break # EOF
status_id = int(line)
ticks = (status_id >> 22) + TWEPOCH
print ticks
# print datetime.fromtimestamp(ticks / 1000.0)
# print out only the milliseconds part
# print ticks % 1000
# sys.stdout.write('%d\n' % (ticks % 1000))
# sys.stdout.flush()
|
<commit_before><commit_msg>Add python script that converts Twitter IDs to timestamps<commit_after>import sys
from datetime import datetime
# use like:
# twilight stream --filter "locations=-180,-90,180,90" | grep --line-buffered -v '^{"delete":' | jq --unbuffered -r .id_str | gstdbuf -o0 head -1000 | python -u /Users/chbrown/github/sandbox/python/twitter_status_ids.py
TWEPOCH = 1288834974657
while True:
line = sys.stdin.readline()
if not line: break # EOF
status_id = int(line)
ticks = (status_id >> 22) + TWEPOCH
print ticks
# print datetime.fromtimestamp(ticks / 1000.0)
# print out only the milliseconds part
# print ticks % 1000
# sys.stdout.write('%d\n' % (ticks % 1000))
# sys.stdout.flush()
|
|
594b0e28d840de323fd98a5a2f3acd543d94fec2
|
integration-test/192-shield-text-ref.py
|
integration-test/192-shield-text-ref.py
|
# US 101, "James Lick Freeway"
# http://www.openstreetmap.org/way/27183379
# http://www.openstreetmap.org/relation/108619
assert_has_feature(
16, 10484, 25334, 'roads',
{ 'kind': 'highway', 'network': 'US:US', 'id': 27183379,
'shield_text': '101' })
# I-77, I-81, US-11 & US-52 all in one road West Virginia.
#
# http://www.openstreetmap.org/way/51388984
# http://www.openstreetmap.org/relation/2309416
# http://www.openstreetmap.org/relation/2301037
# http://www.openstreetmap.org/relation/2297359
# http://www.openstreetmap.org/relation/1027748
assert_has_feature(
16, 18022, 25522, 'roads',
{ 'kind': 'highway', 'network': 'US:I', 'id': 51388984,
'shield_text': '77' })
|
Add test case for networks and network sorting.
|
Add test case for networks and network sorting.
|
Python
|
mit
|
mapzen/vector-datasource,mapzen/vector-datasource,mapzen/vector-datasource
|
Add test case for networks and network sorting.
|
# US 101, "James Lick Freeway"
# http://www.openstreetmap.org/way/27183379
# http://www.openstreetmap.org/relation/108619
assert_has_feature(
16, 10484, 25334, 'roads',
{ 'kind': 'highway', 'network': 'US:US', 'id': 27183379,
'shield_text': '101' })
# I-77, I-81, US-11 & US-52 all in one road West Virginia.
#
# http://www.openstreetmap.org/way/51388984
# http://www.openstreetmap.org/relation/2309416
# http://www.openstreetmap.org/relation/2301037
# http://www.openstreetmap.org/relation/2297359
# http://www.openstreetmap.org/relation/1027748
assert_has_feature(
16, 18022, 25522, 'roads',
{ 'kind': 'highway', 'network': 'US:I', 'id': 51388984,
'shield_text': '77' })
|
<commit_before><commit_msg>Add test case for networks and network sorting.<commit_after>
|
# US 101, "James Lick Freeway"
# http://www.openstreetmap.org/way/27183379
# http://www.openstreetmap.org/relation/108619
assert_has_feature(
16, 10484, 25334, 'roads',
{ 'kind': 'highway', 'network': 'US:US', 'id': 27183379,
'shield_text': '101' })
# I-77, I-81, US-11 & US-52 all in one road West Virginia.
#
# http://www.openstreetmap.org/way/51388984
# http://www.openstreetmap.org/relation/2309416
# http://www.openstreetmap.org/relation/2301037
# http://www.openstreetmap.org/relation/2297359
# http://www.openstreetmap.org/relation/1027748
assert_has_feature(
16, 18022, 25522, 'roads',
{ 'kind': 'highway', 'network': 'US:I', 'id': 51388984,
'shield_text': '77' })
|
Add test case for networks and network sorting.# US 101, "James Lick Freeway"
# http://www.openstreetmap.org/way/27183379
# http://www.openstreetmap.org/relation/108619
assert_has_feature(
16, 10484, 25334, 'roads',
{ 'kind': 'highway', 'network': 'US:US', 'id': 27183379,
'shield_text': '101' })
# I-77, I-81, US-11 & US-52 all in one road West Virginia.
#
# http://www.openstreetmap.org/way/51388984
# http://www.openstreetmap.org/relation/2309416
# http://www.openstreetmap.org/relation/2301037
# http://www.openstreetmap.org/relation/2297359
# http://www.openstreetmap.org/relation/1027748
assert_has_feature(
16, 18022, 25522, 'roads',
{ 'kind': 'highway', 'network': 'US:I', 'id': 51388984,
'shield_text': '77' })
|
<commit_before><commit_msg>Add test case for networks and network sorting.<commit_after># US 101, "James Lick Freeway"
# http://www.openstreetmap.org/way/27183379
# http://www.openstreetmap.org/relation/108619
assert_has_feature(
16, 10484, 25334, 'roads',
{ 'kind': 'highway', 'network': 'US:US', 'id': 27183379,
'shield_text': '101' })
# I-77, I-81, US-11 & US-52 all in one road West Virginia.
#
# http://www.openstreetmap.org/way/51388984
# http://www.openstreetmap.org/relation/2309416
# http://www.openstreetmap.org/relation/2301037
# http://www.openstreetmap.org/relation/2297359
# http://www.openstreetmap.org/relation/1027748
assert_has_feature(
16, 18022, 25522, 'roads',
{ 'kind': 'highway', 'network': 'US:I', 'id': 51388984,
'shield_text': '77' })
|
|
4bd22be3c6b0d2a63fdf6d7a393d790025d16515
|
examples/unsuported_semantics.py
|
examples/unsuported_semantics.py
|
# Note: Display the list of unsuported semantics
from operator import itemgetter
from triton import *
unsuportedSemantics = dict()
def cbefore(instruction):
if len(instruction.symbolicElements) == 0:
mnemonic = opcodeToString(instruction.opcode)
if mnemonic in unsuportedSemantics:
unsuportedSemantics[mnemonic] += 1
else:
unsuportedSemantics.update({mnemonic: 1})
return
def cfini():
l = unsuportedSemantics.items()
l.sort(key=itemgetter(1), reverse=True)
for i in l:
print '%s: %d' %(i[0].lower(), i[1])
return
if __name__ == '__main__':
startAnalysisFromSymbol('main')
addCallback(cbefore, IDREF.CALLBACK.BEFORE)
addCallback(cfini, IDREF.CALLBACK.FINI)
runProgram()
|
Add an example which lists the unsuported semantics - useful
|
Add an example which lists the unsuported semantics - useful
|
Python
|
apache-2.0
|
JonathanSalwan/Triton,JonathanSalwan/Triton,JonathanSalwan/Triton,JonathanSalwan/Triton,JonathanSalwan/Triton
|
Add an example which lists the unsuported semantics - useful
|
# Note: Display the list of unsuported semantics
from operator import itemgetter
from triton import *
unsuportedSemantics = dict()
def cbefore(instruction):
if len(instruction.symbolicElements) == 0:
mnemonic = opcodeToString(instruction.opcode)
if mnemonic in unsuportedSemantics:
unsuportedSemantics[mnemonic] += 1
else:
unsuportedSemantics.update({mnemonic: 1})
return
def cfini():
l = unsuportedSemantics.items()
l.sort(key=itemgetter(1), reverse=True)
for i in l:
print '%s: %d' %(i[0].lower(), i[1])
return
if __name__ == '__main__':
startAnalysisFromSymbol('main')
addCallback(cbefore, IDREF.CALLBACK.BEFORE)
addCallback(cfini, IDREF.CALLBACK.FINI)
runProgram()
|
<commit_before><commit_msg>Add an example which lists the unsuported semantics - useful<commit_after>
|
# Note: Display the list of unsuported semantics
from operator import itemgetter
from triton import *
unsuportedSemantics = dict()
def cbefore(instruction):
if len(instruction.symbolicElements) == 0:
mnemonic = opcodeToString(instruction.opcode)
if mnemonic in unsuportedSemantics:
unsuportedSemantics[mnemonic] += 1
else:
unsuportedSemantics.update({mnemonic: 1})
return
def cfini():
l = unsuportedSemantics.items()
l.sort(key=itemgetter(1), reverse=True)
for i in l:
print '%s: %d' %(i[0].lower(), i[1])
return
if __name__ == '__main__':
startAnalysisFromSymbol('main')
addCallback(cbefore, IDREF.CALLBACK.BEFORE)
addCallback(cfini, IDREF.CALLBACK.FINI)
runProgram()
|
Add an example which lists the unsuported semantics - useful
# Note: Display the list of unsuported semantics
from operator import itemgetter
from triton import *
unsuportedSemantics = dict()
def cbefore(instruction):
if len(instruction.symbolicElements) == 0:
mnemonic = opcodeToString(instruction.opcode)
if mnemonic in unsuportedSemantics:
unsuportedSemantics[mnemonic] += 1
else:
unsuportedSemantics.update({mnemonic: 1})
return
def cfini():
l = unsuportedSemantics.items()
l.sort(key=itemgetter(1), reverse=True)
for i in l:
print '%s: %d' %(i[0].lower(), i[1])
return
if __name__ == '__main__':
startAnalysisFromSymbol('main')
addCallback(cbefore, IDREF.CALLBACK.BEFORE)
addCallback(cfini, IDREF.CALLBACK.FINI)
runProgram()
|
<commit_before><commit_msg>Add an example which lists the unsuported semantics - useful<commit_after>
# Note: Display the list of unsuported semantics
from operator import itemgetter
from triton import *
unsuportedSemantics = dict()
def cbefore(instruction):
if len(instruction.symbolicElements) == 0:
mnemonic = opcodeToString(instruction.opcode)
if mnemonic in unsuportedSemantics:
unsuportedSemantics[mnemonic] += 1
else:
unsuportedSemantics.update({mnemonic: 1})
return
def cfini():
l = unsuportedSemantics.items()
l.sort(key=itemgetter(1), reverse=True)
for i in l:
print '%s: %d' %(i[0].lower(), i[1])
return
if __name__ == '__main__':
startAnalysisFromSymbol('main')
addCallback(cbefore, IDREF.CALLBACK.BEFORE)
addCallback(cfini, IDREF.CALLBACK.FINI)
runProgram()
|
|
14c48e8d8e44b0f3ff4f0a9d6dbeab9ae38201ab
|
test_bug.py
|
test_bug.py
|
code = '''from __future__ import unicode_literals
from webtest import Upload
def test_create_break(testapp, session):
sid = session['id']
resp = testapp.post(
'/sessions/{}/files'.format(sid),
dict(
file=Upload('foobar.py', b'print 123'),
),
status=201,
)
file_id = resp.json['file']['id']
resp = testapp.post(
'/sessions/{}/breaks'.format(sid),
dict(
lineno=123,
file_id=file_id,
# TODO: break types?
# TODO: other info
),
status=201,
)
assert resp.json['id'].startswith('BK')
assert resp.json['file']['id'] == file_id
assert resp.json['lineno'] == 123
'''
import py
from py._code.source import getstatementrange_ast
def test_getstatementrange_ast():
source = py.code.Source(code)
_, _, end = getstatementrange_ast(19, source)
assert end == 31
|
Add test to demonstrage the bug
|
Add test to demonstrage the bug
|
Python
|
mit
|
victorlin/py.test-source-deindenting-bug-demo
|
Add test to demonstrage the bug
|
code = '''from __future__ import unicode_literals
from webtest import Upload
def test_create_break(testapp, session):
sid = session['id']
resp = testapp.post(
'/sessions/{}/files'.format(sid),
dict(
file=Upload('foobar.py', b'print 123'),
),
status=201,
)
file_id = resp.json['file']['id']
resp = testapp.post(
'/sessions/{}/breaks'.format(sid),
dict(
lineno=123,
file_id=file_id,
# TODO: break types?
# TODO: other info
),
status=201,
)
assert resp.json['id'].startswith('BK')
assert resp.json['file']['id'] == file_id
assert resp.json['lineno'] == 123
'''
import py
from py._code.source import getstatementrange_ast
def test_getstatementrange_ast():
source = py.code.Source(code)
_, _, end = getstatementrange_ast(19, source)
assert end == 31
|
<commit_before><commit_msg>Add test to demonstrage the bug<commit_after>
|
code = '''from __future__ import unicode_literals
from webtest import Upload
def test_create_break(testapp, session):
sid = session['id']
resp = testapp.post(
'/sessions/{}/files'.format(sid),
dict(
file=Upload('foobar.py', b'print 123'),
),
status=201,
)
file_id = resp.json['file']['id']
resp = testapp.post(
'/sessions/{}/breaks'.format(sid),
dict(
lineno=123,
file_id=file_id,
# TODO: break types?
# TODO: other info
),
status=201,
)
assert resp.json['id'].startswith('BK')
assert resp.json['file']['id'] == file_id
assert resp.json['lineno'] == 123
'''
import py
from py._code.source import getstatementrange_ast
def test_getstatementrange_ast():
source = py.code.Source(code)
_, _, end = getstatementrange_ast(19, source)
assert end == 31
|
Add test to demonstrage the bugcode = '''from __future__ import unicode_literals
from webtest import Upload
def test_create_break(testapp, session):
sid = session['id']
resp = testapp.post(
'/sessions/{}/files'.format(sid),
dict(
file=Upload('foobar.py', b'print 123'),
),
status=201,
)
file_id = resp.json['file']['id']
resp = testapp.post(
'/sessions/{}/breaks'.format(sid),
dict(
lineno=123,
file_id=file_id,
# TODO: break types?
# TODO: other info
),
status=201,
)
assert resp.json['id'].startswith('BK')
assert resp.json['file']['id'] == file_id
assert resp.json['lineno'] == 123
'''
import py
from py._code.source import getstatementrange_ast
def test_getstatementrange_ast():
source = py.code.Source(code)
_, _, end = getstatementrange_ast(19, source)
assert end == 31
|
<commit_before><commit_msg>Add test to demonstrage the bug<commit_after>code = '''from __future__ import unicode_literals
from webtest import Upload
def test_create_break(testapp, session):
sid = session['id']
resp = testapp.post(
'/sessions/{}/files'.format(sid),
dict(
file=Upload('foobar.py', b'print 123'),
),
status=201,
)
file_id = resp.json['file']['id']
resp = testapp.post(
'/sessions/{}/breaks'.format(sid),
dict(
lineno=123,
file_id=file_id,
# TODO: break types?
# TODO: other info
),
status=201,
)
assert resp.json['id'].startswith('BK')
assert resp.json['file']['id'] == file_id
assert resp.json['lineno'] == 123
'''
import py
from py._code.source import getstatementrange_ast
def test_getstatementrange_ast():
source = py.code.Source(code)
_, _, end = getstatementrange_ast(19, source)
assert end == 31
|
|
b0451e622be57fade28ed431c4f9031093db3777
|
tests/test_cellom2tif.py
|
tests/test_cellom2tif.py
|
from cellom2tif import cellom2tif
def test_start():
cellom2tif.start()
assert cellom2tif.VM_STARTED
def test_done():
cellom2tif.done()
assert cellom2tif.VM_KILLED
|
Test individual functions in cellom2tif.py
|
Test individual functions in cellom2tif.py
|
Python
|
bsd-3-clause
|
jni/cellom2tif
|
Test individual functions in cellom2tif.py
|
from cellom2tif import cellom2tif
def test_start():
cellom2tif.start()
assert cellom2tif.VM_STARTED
def test_done():
cellom2tif.done()
assert cellom2tif.VM_KILLED
|
<commit_before><commit_msg>Test individual functions in cellom2tif.py<commit_after>
|
from cellom2tif import cellom2tif
def test_start():
cellom2tif.start()
assert cellom2tif.VM_STARTED
def test_done():
cellom2tif.done()
assert cellom2tif.VM_KILLED
|
Test individual functions in cellom2tif.pyfrom cellom2tif import cellom2tif
def test_start():
cellom2tif.start()
assert cellom2tif.VM_STARTED
def test_done():
cellom2tif.done()
assert cellom2tif.VM_KILLED
|
<commit_before><commit_msg>Test individual functions in cellom2tif.py<commit_after>from cellom2tif import cellom2tif
def test_start():
cellom2tif.start()
assert cellom2tif.VM_STARTED
def test_done():
cellom2tif.done()
assert cellom2tif.VM_KILLED
|
|
014e9eaa4ccc29d025f3910870789ad23d454ae3
|
tools/aa_bench_to_csv.py
|
tools/aa_bench_to_csv.py
|
#!/usr/bin/env python3
import sys
import os
import argparse
import json
def read_stat_file(path):
with open(path, 'r') as f:
json_data = f.read()
parsed = json.loads(json_data)
return parsed
def main():
parser = argparse.ArgumentParser(description = 'Converts a sweep file to CSV.')
parser.add_argument('--stat',
help = 'Stat files',
type = str)
parser.add_argument('--output',
help = 'Output path',
type = str)
parser.add_argument('--subtract-blit-overhead',
help = 'Subtract the time for blitting results (equivalent to "none" method)',
action = 'store_true')
args = parser.parse_args()
if args.stat is None:
sys.exit(1)
stats = read_stat_file(args.stat)
delta = 0.0
if args.subtract_blit_overhead:
for run in stats['runs']:
if run['method'] == 'none':
delta = -run['avg']
break
lines = [','.join(['Method', 'Average time ' + stats['runs'][0]['gpu']]) + '\n']
for run in stats['runs']:
method = run['method']
line = [method, max(run['avg'] + delta, 0.0)]
line = [str(x) for x in line]
lines.append(','.join(line) + '\n')
if args.output is not None:
with open(args.output, 'w') as f:
f.writelines(lines)
else:
for line in lines:
print(line)
if __name__ == '__main__':
main()
|
Add a script to convert AA bench runs to CSV.
|
Add a script to convert AA bench runs to CSV.
|
Python
|
mit
|
Themaister/Granite,Themaister/Granite,Themaister/Granite,Themaister/Granite,Themaister/Granite,Themaister/Granite
|
Add a script to convert AA bench runs to CSV.
|
#!/usr/bin/env python3
import sys
import os
import argparse
import json
def read_stat_file(path):
with open(path, 'r') as f:
json_data = f.read()
parsed = json.loads(json_data)
return parsed
def main():
parser = argparse.ArgumentParser(description = 'Converts a sweep file to CSV.')
parser.add_argument('--stat',
help = 'Stat files',
type = str)
parser.add_argument('--output',
help = 'Output path',
type = str)
parser.add_argument('--subtract-blit-overhead',
help = 'Subtract the time for blitting results (equivalent to "none" method)',
action = 'store_true')
args = parser.parse_args()
if args.stat is None:
sys.exit(1)
stats = read_stat_file(args.stat)
delta = 0.0
if args.subtract_blit_overhead:
for run in stats['runs']:
if run['method'] == 'none':
delta = -run['avg']
break
lines = [','.join(['Method', 'Average time ' + stats['runs'][0]['gpu']]) + '\n']
for run in stats['runs']:
method = run['method']
line = [method, max(run['avg'] + delta, 0.0)]
line = [str(x) for x in line]
lines.append(','.join(line) + '\n')
if args.output is not None:
with open(args.output, 'w') as f:
f.writelines(lines)
else:
for line in lines:
print(line)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a script to convert AA bench runs to CSV.<commit_after>
|
#!/usr/bin/env python3
import sys
import os
import argparse
import json
def read_stat_file(path):
with open(path, 'r') as f:
json_data = f.read()
parsed = json.loads(json_data)
return parsed
def main():
parser = argparse.ArgumentParser(description = 'Converts a sweep file to CSV.')
parser.add_argument('--stat',
help = 'Stat files',
type = str)
parser.add_argument('--output',
help = 'Output path',
type = str)
parser.add_argument('--subtract-blit-overhead',
help = 'Subtract the time for blitting results (equivalent to "none" method)',
action = 'store_true')
args = parser.parse_args()
if args.stat is None:
sys.exit(1)
stats = read_stat_file(args.stat)
delta = 0.0
if args.subtract_blit_overhead:
for run in stats['runs']:
if run['method'] == 'none':
delta = -run['avg']
break
lines = [','.join(['Method', 'Average time ' + stats['runs'][0]['gpu']]) + '\n']
for run in stats['runs']:
method = run['method']
line = [method, max(run['avg'] + delta, 0.0)]
line = [str(x) for x in line]
lines.append(','.join(line) + '\n')
if args.output is not None:
with open(args.output, 'w') as f:
f.writelines(lines)
else:
for line in lines:
print(line)
if __name__ == '__main__':
main()
|
Add a script to convert AA bench runs to CSV.#!/usr/bin/env python3
import sys
import os
import argparse
import json
def read_stat_file(path):
with open(path, 'r') as f:
json_data = f.read()
parsed = json.loads(json_data)
return parsed
def main():
parser = argparse.ArgumentParser(description = 'Converts a sweep file to CSV.')
parser.add_argument('--stat',
help = 'Stat files',
type = str)
parser.add_argument('--output',
help = 'Output path',
type = str)
parser.add_argument('--subtract-blit-overhead',
help = 'Subtract the time for blitting results (equivalent to "none" method)',
action = 'store_true')
args = parser.parse_args()
if args.stat is None:
sys.exit(1)
stats = read_stat_file(args.stat)
delta = 0.0
if args.subtract_blit_overhead:
for run in stats['runs']:
if run['method'] == 'none':
delta = -run['avg']
break
lines = [','.join(['Method', 'Average time ' + stats['runs'][0]['gpu']]) + '\n']
for run in stats['runs']:
method = run['method']
line = [method, max(run['avg'] + delta, 0.0)]
line = [str(x) for x in line]
lines.append(','.join(line) + '\n')
if args.output is not None:
with open(args.output, 'w') as f:
f.writelines(lines)
else:
for line in lines:
print(line)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a script to convert AA bench runs to CSV.<commit_after>#!/usr/bin/env python3
import sys
import os
import argparse
import json
def read_stat_file(path):
with open(path, 'r') as f:
json_data = f.read()
parsed = json.loads(json_data)
return parsed
def main():
parser = argparse.ArgumentParser(description = 'Converts a sweep file to CSV.')
parser.add_argument('--stat',
help = 'Stat files',
type = str)
parser.add_argument('--output',
help = 'Output path',
type = str)
parser.add_argument('--subtract-blit-overhead',
help = 'Subtract the time for blitting results (equivalent to "none" method)',
action = 'store_true')
args = parser.parse_args()
if args.stat is None:
sys.exit(1)
stats = read_stat_file(args.stat)
delta = 0.0
if args.subtract_blit_overhead:
for run in stats['runs']:
if run['method'] == 'none':
delta = -run['avg']
break
lines = [','.join(['Method', 'Average time ' + stats['runs'][0]['gpu']]) + '\n']
for run in stats['runs']:
method = run['method']
line = [method, max(run['avg'] + delta, 0.0)]
line = [str(x) for x in line]
lines.append(','.join(line) + '\n')
if args.output is not None:
with open(args.output, 'w') as f:
f.writelines(lines)
else:
for line in lines:
print(line)
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.