commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
986e1283eacdca354ac56e590f6613b5f323758a
|
spec/integrations_spec.py
|
spec/integrations_spec.py
|
from expects import expect, be
from doublex import Spy
from doublex_expects import have_been_called
from pysellus import integrations
from pysellus.integrations import on_failure
with description('the integrations module'):
with context('exposes an `on_failure` decorator which'):
with before.each:
integrations.integrations = {}
with after.each:
integrations.integrations = {}
with it('returns the decorated function as is'):
decorated_function = Spy().decorated_function
expect(on_failure('some_integration_name')(decorated_function)).to(be(decorated_function))
with it('doesn\'t call the decorated function'):
decorated_function = Spy().decorated_function
on_failure('some_integration_name')(decorated_function)
expect(decorated_function).to_not(have_been_called)
|
Add two basic tests for `on_failure`.
|
Add two basic tests for `on_failure`.
|
Python
|
mit
|
angelsanz/pysellus,Pysellus/pysellus,cgvarela/pysellus,ergl/pysellus
|
Add two basic tests for `on_failure`.
|
from expects import expect, be
from doublex import Spy
from doublex_expects import have_been_called
from pysellus import integrations
from pysellus.integrations import on_failure
with description('the integrations module'):
with context('exposes an `on_failure` decorator which'):
with before.each:
integrations.integrations = {}
with after.each:
integrations.integrations = {}
with it('returns the decorated function as is'):
decorated_function = Spy().decorated_function
expect(on_failure('some_integration_name')(decorated_function)).to(be(decorated_function))
with it('doesn\'t call the decorated function'):
decorated_function = Spy().decorated_function
on_failure('some_integration_name')(decorated_function)
expect(decorated_function).to_not(have_been_called)
|
<commit_before><commit_msg>Add two basic tests for `on_failure`.<commit_after>
|
from expects import expect, be
from doublex import Spy
from doublex_expects import have_been_called
from pysellus import integrations
from pysellus.integrations import on_failure
with description('the integrations module'):
with context('exposes an `on_failure` decorator which'):
with before.each:
integrations.integrations = {}
with after.each:
integrations.integrations = {}
with it('returns the decorated function as is'):
decorated_function = Spy().decorated_function
expect(on_failure('some_integration_name')(decorated_function)).to(be(decorated_function))
with it('doesn\'t call the decorated function'):
decorated_function = Spy().decorated_function
on_failure('some_integration_name')(decorated_function)
expect(decorated_function).to_not(have_been_called)
|
Add two basic tests for `on_failure`.from expects import expect, be
from doublex import Spy
from doublex_expects import have_been_called
from pysellus import integrations
from pysellus.integrations import on_failure
with description('the integrations module'):
with context('exposes an `on_failure` decorator which'):
with before.each:
integrations.integrations = {}
with after.each:
integrations.integrations = {}
with it('returns the decorated function as is'):
decorated_function = Spy().decorated_function
expect(on_failure('some_integration_name')(decorated_function)).to(be(decorated_function))
with it('doesn\'t call the decorated function'):
decorated_function = Spy().decorated_function
on_failure('some_integration_name')(decorated_function)
expect(decorated_function).to_not(have_been_called)
|
<commit_before><commit_msg>Add two basic tests for `on_failure`.<commit_after>from expects import expect, be
from doublex import Spy
from doublex_expects import have_been_called
from pysellus import integrations
from pysellus.integrations import on_failure
with description('the integrations module'):
with context('exposes an `on_failure` decorator which'):
with before.each:
integrations.integrations = {}
with after.each:
integrations.integrations = {}
with it('returns the decorated function as is'):
decorated_function = Spy().decorated_function
expect(on_failure('some_integration_name')(decorated_function)).to(be(decorated_function))
with it('doesn\'t call the decorated function'):
decorated_function = Spy().decorated_function
on_failure('some_integration_name')(decorated_function)
expect(decorated_function).to_not(have_been_called)
|
|
d6019cc60cf90d57a4804d8942a8fd1ccc7431f2
|
openfisca_senegal/survey_scenarios.py
|
openfisca_senegal/survey_scenarios.py
|
# -*- coding: utf-8 -*-
from openfisca_senegal import CountryTaxBenefitSystem as SenegalTaxBenefitSystem
from openfisca_survey_manager.scenarios import AbstractSurveyScenario
class SenegalSurveyScenario(AbstractSurveyScenario):
id_variable_by_entity_key = dict(
famille = 'id_famille',
)
role_variable_by_entity_key = dict(
famille = 'role_famille',
)
def __init__(self, input_data_frame = None, tax_benefit_system = None,
reference_tax_benefit_system = None, year = None):
super(SenegalSurveyScenario, self).__init__()
assert input_data_frame is not None
assert year is not None
self.year = year
if tax_benefit_system is None:
tax_benefit_system = SenegalTaxBenefitSystem()
self.set_tax_benefit_systems(
tax_benefit_system = tax_benefit_system,
reference_tax_benefit_system = reference_tax_benefit_system
)
self.used_as_input_variables = list(
set(tax_benefit_system.column_by_name.keys()).intersection(
set(input_data_frame.columns)
))
self.init_from_data_frame(input_data_frame = input_data_frame)
self.new_simulation()
if reference_tax_benefit_system is not None:
self.new_simulation(reference = True)
|
Add SurveyScenario adapted to SenegalTaxBenFitSystem
|
Add SurveyScenario adapted to SenegalTaxBenFitSystem
|
Python
|
agpl-3.0
|
openfisca/senegal
|
Add SurveyScenario adapted to SenegalTaxBenFitSystem
|
# -*- coding: utf-8 -*-
from openfisca_senegal import CountryTaxBenefitSystem as SenegalTaxBenefitSystem
from openfisca_survey_manager.scenarios import AbstractSurveyScenario
class SenegalSurveyScenario(AbstractSurveyScenario):
id_variable_by_entity_key = dict(
famille = 'id_famille',
)
role_variable_by_entity_key = dict(
famille = 'role_famille',
)
def __init__(self, input_data_frame = None, tax_benefit_system = None,
reference_tax_benefit_system = None, year = None):
super(SenegalSurveyScenario, self).__init__()
assert input_data_frame is not None
assert year is not None
self.year = year
if tax_benefit_system is None:
tax_benefit_system = SenegalTaxBenefitSystem()
self.set_tax_benefit_systems(
tax_benefit_system = tax_benefit_system,
reference_tax_benefit_system = reference_tax_benefit_system
)
self.used_as_input_variables = list(
set(tax_benefit_system.column_by_name.keys()).intersection(
set(input_data_frame.columns)
))
self.init_from_data_frame(input_data_frame = input_data_frame)
self.new_simulation()
if reference_tax_benefit_system is not None:
self.new_simulation(reference = True)
|
<commit_before><commit_msg>Add SurveyScenario adapted to SenegalTaxBenFitSystem<commit_after>
|
# -*- coding: utf-8 -*-
from openfisca_senegal import CountryTaxBenefitSystem as SenegalTaxBenefitSystem
from openfisca_survey_manager.scenarios import AbstractSurveyScenario
class SenegalSurveyScenario(AbstractSurveyScenario):
id_variable_by_entity_key = dict(
famille = 'id_famille',
)
role_variable_by_entity_key = dict(
famille = 'role_famille',
)
def __init__(self, input_data_frame = None, tax_benefit_system = None,
reference_tax_benefit_system = None, year = None):
super(SenegalSurveyScenario, self).__init__()
assert input_data_frame is not None
assert year is not None
self.year = year
if tax_benefit_system is None:
tax_benefit_system = SenegalTaxBenefitSystem()
self.set_tax_benefit_systems(
tax_benefit_system = tax_benefit_system,
reference_tax_benefit_system = reference_tax_benefit_system
)
self.used_as_input_variables = list(
set(tax_benefit_system.column_by_name.keys()).intersection(
set(input_data_frame.columns)
))
self.init_from_data_frame(input_data_frame = input_data_frame)
self.new_simulation()
if reference_tax_benefit_system is not None:
self.new_simulation(reference = True)
|
Add SurveyScenario adapted to SenegalTaxBenFitSystem# -*- coding: utf-8 -*-
from openfisca_senegal import CountryTaxBenefitSystem as SenegalTaxBenefitSystem
from openfisca_survey_manager.scenarios import AbstractSurveyScenario
class SenegalSurveyScenario(AbstractSurveyScenario):
id_variable_by_entity_key = dict(
famille = 'id_famille',
)
role_variable_by_entity_key = dict(
famille = 'role_famille',
)
def __init__(self, input_data_frame = None, tax_benefit_system = None,
reference_tax_benefit_system = None, year = None):
super(SenegalSurveyScenario, self).__init__()
assert input_data_frame is not None
assert year is not None
self.year = year
if tax_benefit_system is None:
tax_benefit_system = SenegalTaxBenefitSystem()
self.set_tax_benefit_systems(
tax_benefit_system = tax_benefit_system,
reference_tax_benefit_system = reference_tax_benefit_system
)
self.used_as_input_variables = list(
set(tax_benefit_system.column_by_name.keys()).intersection(
set(input_data_frame.columns)
))
self.init_from_data_frame(input_data_frame = input_data_frame)
self.new_simulation()
if reference_tax_benefit_system is not None:
self.new_simulation(reference = True)
|
<commit_before><commit_msg>Add SurveyScenario adapted to SenegalTaxBenFitSystem<commit_after># -*- coding: utf-8 -*-
from openfisca_senegal import CountryTaxBenefitSystem as SenegalTaxBenefitSystem
from openfisca_survey_manager.scenarios import AbstractSurveyScenario
class SenegalSurveyScenario(AbstractSurveyScenario):
id_variable_by_entity_key = dict(
famille = 'id_famille',
)
role_variable_by_entity_key = dict(
famille = 'role_famille',
)
def __init__(self, input_data_frame = None, tax_benefit_system = None,
reference_tax_benefit_system = None, year = None):
super(SenegalSurveyScenario, self).__init__()
assert input_data_frame is not None
assert year is not None
self.year = year
if tax_benefit_system is None:
tax_benefit_system = SenegalTaxBenefitSystem()
self.set_tax_benefit_systems(
tax_benefit_system = tax_benefit_system,
reference_tax_benefit_system = reference_tax_benefit_system
)
self.used_as_input_variables = list(
set(tax_benefit_system.column_by_name.keys()).intersection(
set(input_data_frame.columns)
))
self.init_from_data_frame(input_data_frame = input_data_frame)
self.new_simulation()
if reference_tax_benefit_system is not None:
self.new_simulation(reference = True)
|
|
4088b0b3f4c5bf9887bdbe7fd43c5944dc1bceb7
|
tests/python_tests/test_array_index.py
|
tests/python_tests/test_array_index.py
|
import pytest
import xchainer
def test_newaxis():
assert xchainer.newaxis is None
def test_broadcastable():
assert xchainer.broadcastable is xchainer.broadcastable
|
Add simple tests of newaxis and broadcastable python binding
|
Add simple tests of newaxis and broadcastable python binding
|
Python
|
mit
|
okuta/chainer,niboshi/chainer,hvy/chainer,jnishi/chainer,jnishi/chainer,niboshi/chainer,tkerola/chainer,hvy/chainer,chainer/chainer,wkentaro/chainer,keisuke-umezawa/chainer,chainer/chainer,ktnyt/chainer,okuta/chainer,hvy/chainer,wkentaro/chainer,keisuke-umezawa/chainer,niboshi/chainer,okuta/chainer,jnishi/chainer,jnishi/chainer,ktnyt/chainer,hvy/chainer,okuta/chainer,pfnet/chainer,ktnyt/chainer,keisuke-umezawa/chainer,keisuke-umezawa/chainer,wkentaro/chainer,niboshi/chainer,chainer/chainer,ktnyt/chainer,chainer/chainer,wkentaro/chainer
|
Add simple tests of newaxis and broadcastable python binding
|
import pytest
import xchainer
def test_newaxis():
assert xchainer.newaxis is None
def test_broadcastable():
assert xchainer.broadcastable is xchainer.broadcastable
|
<commit_before><commit_msg>Add simple tests of newaxis and broadcastable python binding<commit_after>
|
import pytest
import xchainer
def test_newaxis():
assert xchainer.newaxis is None
def test_broadcastable():
assert xchainer.broadcastable is xchainer.broadcastable
|
Add simple tests of newaxis and broadcastable python bindingimport pytest
import xchainer
def test_newaxis():
assert xchainer.newaxis is None
def test_broadcastable():
assert xchainer.broadcastable is xchainer.broadcastable
|
<commit_before><commit_msg>Add simple tests of newaxis and broadcastable python binding<commit_after>import pytest
import xchainer
def test_newaxis():
assert xchainer.newaxis is None
def test_broadcastable():
assert xchainer.broadcastable is xchainer.broadcastable
|
|
7378250087986dfeb1194b063d7b17e902d6bdcb
|
backend/breach/migrations/0001_initial.py
|
backend/breach/migrations/0001_initial.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-27 11:55
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AttackVectorElement',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('symbol', models.CharField(max_length=1)),
],
),
migrations.CreateModel(
name='SampleSet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('amount', models.IntegerField(default=1, help_text='Number of samples contained in the sampleset')),
('knownsecret', models.CharField(help_text='Known secret before the sample set was collected', max_length=255)),
('started', models.DateTimeField(default=django.utils.timezone.now, help_text='Date and time at which sample set collection was started')),
('completed', models.DateTimeField(help_text='When we stopped collecting samples for this sampleset, successfully or not')),
('success', models.BooleanField(help_text='Whether the samples in this sampleset were all collected successfully')),
],
),
migrations.CreateModel(
name='Target',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('endpoint', models.CharField(max_length=255)),
('prefix', models.CharField(max_length=255)),
('alphabet', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Victim',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('target', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='breach.Target')),
],
),
migrations.AddField(
model_name='sampleset',
name='victim',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='breach.Victim'),
),
migrations.AddField(
model_name='attackvectorelement',
name='sampleset',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='breach.SampleSet'),
),
]
|
Add migration to first version of BREACH models
|
Add migration to first version of BREACH models
|
Python
|
mit
|
esarafianou/rupture,esarafianou/rupture,dionyziz/rupture,esarafianou/rupture,dionyziz/rupture,dimkarakostas/rupture,dimkarakostas/rupture,dimkarakostas/rupture,dimriou/rupture,dimkarakostas/rupture,dimkarakostas/rupture,dimriou/rupture,dimriou/rupture,dionyziz/rupture,dimriou/rupture,dimriou/rupture,dionyziz/rupture,dionyziz/rupture,esarafianou/rupture
|
Add migration to first version of BREACH models
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-27 11:55
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AttackVectorElement',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('symbol', models.CharField(max_length=1)),
],
),
migrations.CreateModel(
name='SampleSet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('amount', models.IntegerField(default=1, help_text='Number of samples contained in the sampleset')),
('knownsecret', models.CharField(help_text='Known secret before the sample set was collected', max_length=255)),
('started', models.DateTimeField(default=django.utils.timezone.now, help_text='Date and time at which sample set collection was started')),
('completed', models.DateTimeField(help_text='When we stopped collecting samples for this sampleset, successfully or not')),
('success', models.BooleanField(help_text='Whether the samples in this sampleset were all collected successfully')),
],
),
migrations.CreateModel(
name='Target',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('endpoint', models.CharField(max_length=255)),
('prefix', models.CharField(max_length=255)),
('alphabet', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Victim',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('target', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='breach.Target')),
],
),
migrations.AddField(
model_name='sampleset',
name='victim',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='breach.Victim'),
),
migrations.AddField(
model_name='attackvectorelement',
name='sampleset',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='breach.SampleSet'),
),
]
|
<commit_before><commit_msg>Add migration to first version of BREACH models<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-27 11:55
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AttackVectorElement',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('symbol', models.CharField(max_length=1)),
],
),
migrations.CreateModel(
name='SampleSet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('amount', models.IntegerField(default=1, help_text='Number of samples contained in the sampleset')),
('knownsecret', models.CharField(help_text='Known secret before the sample set was collected', max_length=255)),
('started', models.DateTimeField(default=django.utils.timezone.now, help_text='Date and time at which sample set collection was started')),
('completed', models.DateTimeField(help_text='When we stopped collecting samples for this sampleset, successfully or not')),
('success', models.BooleanField(help_text='Whether the samples in this sampleset were all collected successfully')),
],
),
migrations.CreateModel(
name='Target',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('endpoint', models.CharField(max_length=255)),
('prefix', models.CharField(max_length=255)),
('alphabet', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Victim',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('target', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='breach.Target')),
],
),
migrations.AddField(
model_name='sampleset',
name='victim',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='breach.Victim'),
),
migrations.AddField(
model_name='attackvectorelement',
name='sampleset',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='breach.SampleSet'),
),
]
|
Add migration to first version of BREACH models# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-27 11:55
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AttackVectorElement',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('symbol', models.CharField(max_length=1)),
],
),
migrations.CreateModel(
name='SampleSet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('amount', models.IntegerField(default=1, help_text='Number of samples contained in the sampleset')),
('knownsecret', models.CharField(help_text='Known secret before the sample set was collected', max_length=255)),
('started', models.DateTimeField(default=django.utils.timezone.now, help_text='Date and time at which sample set collection was started')),
('completed', models.DateTimeField(help_text='When we stopped collecting samples for this sampleset, successfully or not')),
('success', models.BooleanField(help_text='Whether the samples in this sampleset were all collected successfully')),
],
),
migrations.CreateModel(
name='Target',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('endpoint', models.CharField(max_length=255)),
('prefix', models.CharField(max_length=255)),
('alphabet', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Victim',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('target', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='breach.Target')),
],
),
migrations.AddField(
model_name='sampleset',
name='victim',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='breach.Victim'),
),
migrations.AddField(
model_name='attackvectorelement',
name='sampleset',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='breach.SampleSet'),
),
]
|
<commit_before><commit_msg>Add migration to first version of BREACH models<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-27 11:55
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AttackVectorElement',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('symbol', models.CharField(max_length=1)),
],
),
migrations.CreateModel(
name='SampleSet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('amount', models.IntegerField(default=1, help_text='Number of samples contained in the sampleset')),
('knownsecret', models.CharField(help_text='Known secret before the sample set was collected', max_length=255)),
('started', models.DateTimeField(default=django.utils.timezone.now, help_text='Date and time at which sample set collection was started')),
('completed', models.DateTimeField(help_text='When we stopped collecting samples for this sampleset, successfully or not')),
('success', models.BooleanField(help_text='Whether the samples in this sampleset were all collected successfully')),
],
),
migrations.CreateModel(
name='Target',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('endpoint', models.CharField(max_length=255)),
('prefix', models.CharField(max_length=255)),
('alphabet', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Victim',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('target', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='breach.Target')),
],
),
migrations.AddField(
model_name='sampleset',
name='victim',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='breach.Victim'),
),
migrations.AddField(
model_name='attackvectorelement',
name='sampleset',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='breach.SampleSet'),
),
]
|
|
af35c0cbf26b848dbc09813102a88c82f3d61280
|
src/main/python/combine_tlda_result.py
|
src/main/python/combine_tlda_result.py
|
# -*- coding: utf-8 -*-
__author__ = 'stefanie & DJ'
'''
'''
import sys, os, re, shutil
def analyzeTopics(inputTopic):
str = inputTopic.read()
strwithoutEnter = str.replace("\n", "")
#In this case, '' should be in the beginning since it's before Topic 0
lineTopics = strwithoutEnter.split("Topic")
print lineTopics
return lineTopics
def returnFirst3Topics(fields):
#topic number should be greater than 3
topics = [1, 2, 3]
for i in range(4, len(fields)):
for j in range(0, 3):
if fields[i] > fields[topics[j]]:
topics[j] = i
break
return topics
def associate_topic(inputData, lineTopics, outputFile, path):
l = inputData.readline()
while l:
line=l.strip()
if line:
fields = line.split("\t")
pid=fields[0].split(".")[0]
# print "processing" + pid
related = returnFirst3Topics(fields)
# print related + [fields[related[0]], fields[related[1]], fields[related[2]]] + fields
outputFile.write("Topics:" + returnFirst3Words(lineTopics[related[0]]) + ";" + returnFirst3Words(lineTopics[related[1]]) + ";" + returnFirst3Words(lineTopics[related[2]]) + "\nIntro:" + returnIntro(pid, path) + "\n")
l = inputData.readline()
def returnIntro(pid, path):
filePath = path + "\originaldocs\sample1_complete\\txt\\" + pid + ".intro.txt"
print filePath
introFile = file(filePath, "r")
intro = introFile.readline()
introFile.close()
return intro
def returnFirst3Words(eachTopics):
line = eachTopics.split("\t")
topics = line[1] + "," + line[3] + "," + line[5]
return topics
'''
def usage():
print "One parameter is required as the LDA result *.theta file path"
'''
if __name__ == "__main__":
if len(sys.argv) < 1: # Expect more then two argument: the input data file and output folder
path="/output/tlda/TopicsDistributionOnUsers.txt"
else :
path=sys.argv[1]
inputFile = path + "\output\\tlda\TopicsDistributionOnUsers.txt"
topicFile = path + "\output\\tlda\WordsInTopics.txt"
outputFile = path + "\output\weibo.txt"
try:
print inputFile
inputData = file(inputFile, "r")
inputTopic = file(topicFile, "r")
outputFile = file(outputFile, "w")
except IOError:
sys.stderr.write("ERROR: Cannot read inputfile %s.\n" % sys.argv)
sys.exit(1)
lineTopics = analyzeTopics(inputTopic)
associate_topic(inputData, lineTopics, outputFile, path)
inputData.close()
inputTopic.close()
outputFile.close()
|
Revert "Revert "the util to combine message""
|
Revert "Revert "the util to combine message""
This reverts commit 91537f48ebd3af6e5d74f14be8bcf12b225f7c81.
|
Python
|
apache-2.0
|
anphoenix/demo_nlp_im,anphoenix/demo_nlp_im
|
Revert "Revert "the util to combine message""
This reverts commit 91537f48ebd3af6e5d74f14be8bcf12b225f7c81.
|
# -*- coding: utf-8 -*-
__author__ = 'stefanie & DJ'
'''
'''
import sys, os, re, shutil
def analyzeTopics(inputTopic):
str = inputTopic.read()
strwithoutEnter = str.replace("\n", "")
#In this case, '' should be in the beginning since it's before Topic 0
lineTopics = strwithoutEnter.split("Topic")
print lineTopics
return lineTopics
def returnFirst3Topics(fields):
#topic number should be greater than 3
topics = [1, 2, 3]
for i in range(4, len(fields)):
for j in range(0, 3):
if fields[i] > fields[topics[j]]:
topics[j] = i
break
return topics
def associate_topic(inputData, lineTopics, outputFile, path):
l = inputData.readline()
while l:
line=l.strip()
if line:
fields = line.split("\t")
pid=fields[0].split(".")[0]
# print "processing" + pid
related = returnFirst3Topics(fields)
# print related + [fields[related[0]], fields[related[1]], fields[related[2]]] + fields
outputFile.write("Topics:" + returnFirst3Words(lineTopics[related[0]]) + ";" + returnFirst3Words(lineTopics[related[1]]) + ";" + returnFirst3Words(lineTopics[related[2]]) + "\nIntro:" + returnIntro(pid, path) + "\n")
l = inputData.readline()
def returnIntro(pid, path):
filePath = path + "\originaldocs\sample1_complete\\txt\\" + pid + ".intro.txt"
print filePath
introFile = file(filePath, "r")
intro = introFile.readline()
introFile.close()
return intro
def returnFirst3Words(eachTopics):
line = eachTopics.split("\t")
topics = line[1] + "," + line[3] + "," + line[5]
return topics
'''
def usage():
print "One parameter is required as the LDA result *.theta file path"
'''
if __name__ == "__main__":
if len(sys.argv) < 1: # Expect more then two argument: the input data file and output folder
path="/output/tlda/TopicsDistributionOnUsers.txt"
else :
path=sys.argv[1]
inputFile = path + "\output\\tlda\TopicsDistributionOnUsers.txt"
topicFile = path + "\output\\tlda\WordsInTopics.txt"
outputFile = path + "\output\weibo.txt"
try:
print inputFile
inputData = file(inputFile, "r")
inputTopic = file(topicFile, "r")
outputFile = file(outputFile, "w")
except IOError:
sys.stderr.write("ERROR: Cannot read inputfile %s.\n" % sys.argv)
sys.exit(1)
lineTopics = analyzeTopics(inputTopic)
associate_topic(inputData, lineTopics, outputFile, path)
inputData.close()
inputTopic.close()
outputFile.close()
|
<commit_before><commit_msg>Revert "Revert "the util to combine message""
This reverts commit 91537f48ebd3af6e5d74f14be8bcf12b225f7c81.<commit_after>
|
# -*- coding: utf-8 -*-
__author__ = 'stefanie & DJ'
'''
'''
import sys, os, re, shutil
def analyzeTopics(inputTopic):
str = inputTopic.read()
strwithoutEnter = str.replace("\n", "")
#In this case, '' should be in the beginning since it's before Topic 0
lineTopics = strwithoutEnter.split("Topic")
print lineTopics
return lineTopics
def returnFirst3Topics(fields):
#topic number should be greater than 3
topics = [1, 2, 3]
for i in range(4, len(fields)):
for j in range(0, 3):
if fields[i] > fields[topics[j]]:
topics[j] = i
break
return topics
def associate_topic(inputData, lineTopics, outputFile, path):
l = inputData.readline()
while l:
line=l.strip()
if line:
fields = line.split("\t")
pid=fields[0].split(".")[0]
# print "processing" + pid
related = returnFirst3Topics(fields)
# print related + [fields[related[0]], fields[related[1]], fields[related[2]]] + fields
outputFile.write("Topics:" + returnFirst3Words(lineTopics[related[0]]) + ";" + returnFirst3Words(lineTopics[related[1]]) + ";" + returnFirst3Words(lineTopics[related[2]]) + "\nIntro:" + returnIntro(pid, path) + "\n")
l = inputData.readline()
def returnIntro(pid, path):
filePath = path + "\originaldocs\sample1_complete\\txt\\" + pid + ".intro.txt"
print filePath
introFile = file(filePath, "r")
intro = introFile.readline()
introFile.close()
return intro
def returnFirst3Words(eachTopics):
line = eachTopics.split("\t")
topics = line[1] + "," + line[3] + "," + line[5]
return topics
'''
def usage():
print "One parameter is required as the LDA result *.theta file path"
'''
if __name__ == "__main__":
if len(sys.argv) < 1: # Expect more then two argument: the input data file and output folder
path="/output/tlda/TopicsDistributionOnUsers.txt"
else :
path=sys.argv[1]
inputFile = path + "\output\\tlda\TopicsDistributionOnUsers.txt"
topicFile = path + "\output\\tlda\WordsInTopics.txt"
outputFile = path + "\output\weibo.txt"
try:
print inputFile
inputData = file(inputFile, "r")
inputTopic = file(topicFile, "r")
outputFile = file(outputFile, "w")
except IOError:
sys.stderr.write("ERROR: Cannot read inputfile %s.\n" % sys.argv)
sys.exit(1)
lineTopics = analyzeTopics(inputTopic)
associate_topic(inputData, lineTopics, outputFile, path)
inputData.close()
inputTopic.close()
outputFile.close()
|
Revert "Revert "the util to combine message""
This reverts commit 91537f48ebd3af6e5d74f14be8bcf12b225f7c81.# -*- coding: utf-8 -*-
__author__ = 'stefanie & DJ'
'''
'''
import sys, os, re, shutil
def analyzeTopics(inputTopic):
str = inputTopic.read()
strwithoutEnter = str.replace("\n", "")
#In this case, '' should be in the beginning since it's before Topic 0
lineTopics = strwithoutEnter.split("Topic")
print lineTopics
return lineTopics
def returnFirst3Topics(fields):
#topic number should be greater than 3
topics = [1, 2, 3]
for i in range(4, len(fields)):
for j in range(0, 3):
if fields[i] > fields[topics[j]]:
topics[j] = i
break
return topics
def associate_topic(inputData, lineTopics, outputFile, path):
l = inputData.readline()
while l:
line=l.strip()
if line:
fields = line.split("\t")
pid=fields[0].split(".")[0]
# print "processing" + pid
related = returnFirst3Topics(fields)
# print related + [fields[related[0]], fields[related[1]], fields[related[2]]] + fields
outputFile.write("Topics:" + returnFirst3Words(lineTopics[related[0]]) + ";" + returnFirst3Words(lineTopics[related[1]]) + ";" + returnFirst3Words(lineTopics[related[2]]) + "\nIntro:" + returnIntro(pid, path) + "\n")
l = inputData.readline()
def returnIntro(pid, path):
filePath = path + "\originaldocs\sample1_complete\\txt\\" + pid + ".intro.txt"
print filePath
introFile = file(filePath, "r")
intro = introFile.readline()
introFile.close()
return intro
def returnFirst3Words(eachTopics):
line = eachTopics.split("\t")
topics = line[1] + "," + line[3] + "," + line[5]
return topics
'''
def usage():
print "One parameter is required as the LDA result *.theta file path"
'''
if __name__ == "__main__":
if len(sys.argv) < 1: # Expect more then two argument: the input data file and output folder
path="/output/tlda/TopicsDistributionOnUsers.txt"
else :
path=sys.argv[1]
inputFile = path + "\output\\tlda\TopicsDistributionOnUsers.txt"
topicFile = path + "\output\\tlda\WordsInTopics.txt"
outputFile = path + "\output\weibo.txt"
try:
print inputFile
inputData = file(inputFile, "r")
inputTopic = file(topicFile, "r")
outputFile = file(outputFile, "w")
except IOError:
sys.stderr.write("ERROR: Cannot read inputfile %s.\n" % sys.argv)
sys.exit(1)
lineTopics = analyzeTopics(inputTopic)
associate_topic(inputData, lineTopics, outputFile, path)
inputData.close()
inputTopic.close()
outputFile.close()
|
<commit_before><commit_msg>Revert "Revert "the util to combine message""
This reverts commit 91537f48ebd3af6e5d74f14be8bcf12b225f7c81.<commit_after># -*- coding: utf-8 -*-
__author__ = 'stefanie & DJ'
'''
'''
import sys, os, re, shutil
def analyzeTopics(inputTopic):
str = inputTopic.read()
strwithoutEnter = str.replace("\n", "")
#In this case, '' should be in the beginning since it's before Topic 0
lineTopics = strwithoutEnter.split("Topic")
print lineTopics
return lineTopics
def returnFirst3Topics(fields):
#topic number should be greater than 3
topics = [1, 2, 3]
for i in range(4, len(fields)):
for j in range(0, 3):
if fields[i] > fields[topics[j]]:
topics[j] = i
break
return topics
def associate_topic(inputData, lineTopics, outputFile, path):
l = inputData.readline()
while l:
line=l.strip()
if line:
fields = line.split("\t")
pid=fields[0].split(".")[0]
# print "processing" + pid
related = returnFirst3Topics(fields)
# print related + [fields[related[0]], fields[related[1]], fields[related[2]]] + fields
outputFile.write("Topics:" + returnFirst3Words(lineTopics[related[0]]) + ";" + returnFirst3Words(lineTopics[related[1]]) + ";" + returnFirst3Words(lineTopics[related[2]]) + "\nIntro:" + returnIntro(pid, path) + "\n")
l = inputData.readline()
def returnIntro(pid, path):
filePath = path + "\originaldocs\sample1_complete\\txt\\" + pid + ".intro.txt"
print filePath
introFile = file(filePath, "r")
intro = introFile.readline()
introFile.close()
return intro
def returnFirst3Words(eachTopics):
line = eachTopics.split("\t")
topics = line[1] + "," + line[3] + "," + line[5]
return topics
'''
def usage():
print "One parameter is required as the LDA result *.theta file path"
'''
if __name__ == "__main__":
if len(sys.argv) < 1: # Expect more then two argument: the input data file and output folder
path="/output/tlda/TopicsDistributionOnUsers.txt"
else :
path=sys.argv[1]
inputFile = path + "\output\\tlda\TopicsDistributionOnUsers.txt"
topicFile = path + "\output\\tlda\WordsInTopics.txt"
outputFile = path + "\output\weibo.txt"
try:
print inputFile
inputData = file(inputFile, "r")
inputTopic = file(topicFile, "r")
outputFile = file(outputFile, "w")
except IOError:
sys.stderr.write("ERROR: Cannot read inputfile %s.\n" % sys.argv)
sys.exit(1)
lineTopics = analyzeTopics(inputTopic)
associate_topic(inputData, lineTopics, outputFile, path)
inputData.close()
inputTopic.close()
outputFile.close()
|
|
5c946b79b5060c7b1dbab3d86ba13cd8e2243add
|
oneflow/profiles/api.py
|
oneflow/profiles/api.py
|
# -*- coding: utf-8 -*-
import logging
from django.contrib.auth import get_user_model
from tastypie.resources import ModelResource
from tastypie import fields
from ..base.api import common_authentication, UserObjectsOnlyAuthorization
from .models import UserProfile
LOGGER = logging.getLogger(__name__)
User = get_user_model()
class EmberMeta:
# Ember-data expect the following 2 directives
always_return_data = True
allowed_methods = ('get', 'post', 'put', 'delete')
# These are specific to 1flow functionnals.
authentication = common_authentication
authorization = UserObjectsOnlyAuthorization()
class UserResource(ModelResource):
class Meta(EmberMeta):
queryset = User.objects.all()
resource_name = 'user'
class UserProfileResource(ModelResource):
user = fields.ForeignKey(UserResource, 'user')
class Meta(EmberMeta):
queryset = UserProfile.objects.all()
resource_name = 'user_profile'
|
Create the User & UserProfile API resources (probably needs restrictions to avoid anyone getting all entries in the DB…).
|
Create the User & UserProfile API resources (probably needs restrictions to avoid anyone getting all entries in the DB…).
|
Python
|
agpl-3.0
|
1flow/1flow,1flow/1flow,WillianPaiva/1flow,WillianPaiva/1flow,WillianPaiva/1flow,1flow/1flow,1flow/1flow,1flow/1flow,WillianPaiva/1flow,WillianPaiva/1flow
|
Create the User & UserProfile API resources (probably needs restrictions to avoid anyone getting all entries in the DB…).
|
# -*- coding: utf-8 -*-
import logging
from django.contrib.auth import get_user_model
from tastypie.resources import ModelResource
from tastypie import fields
from ..base.api import common_authentication, UserObjectsOnlyAuthorization
from .models import UserProfile
LOGGER = logging.getLogger(__name__)
User = get_user_model()
class EmberMeta:
# Ember-data expect the following 2 directives
always_return_data = True
allowed_methods = ('get', 'post', 'put', 'delete')
# These are specific to 1flow functionnals.
authentication = common_authentication
authorization = UserObjectsOnlyAuthorization()
class UserResource(ModelResource):
class Meta(EmberMeta):
queryset = User.objects.all()
resource_name = 'user'
class UserProfileResource(ModelResource):
user = fields.ForeignKey(UserResource, 'user')
class Meta(EmberMeta):
queryset = UserProfile.objects.all()
resource_name = 'user_profile'
|
<commit_before><commit_msg>Create the User & UserProfile API resources (probably needs restrictions to avoid anyone getting all entries in the DB…).<commit_after>
|
# -*- coding: utf-8 -*-
import logging
from django.contrib.auth import get_user_model
from tastypie.resources import ModelResource
from tastypie import fields
from ..base.api import common_authentication, UserObjectsOnlyAuthorization
from .models import UserProfile
LOGGER = logging.getLogger(__name__)
User = get_user_model()
class EmberMeta:
# Ember-data expect the following 2 directives
always_return_data = True
allowed_methods = ('get', 'post', 'put', 'delete')
# These are specific to 1flow functionnals.
authentication = common_authentication
authorization = UserObjectsOnlyAuthorization()
class UserResource(ModelResource):
class Meta(EmberMeta):
queryset = User.objects.all()
resource_name = 'user'
class UserProfileResource(ModelResource):
user = fields.ForeignKey(UserResource, 'user')
class Meta(EmberMeta):
queryset = UserProfile.objects.all()
resource_name = 'user_profile'
|
Create the User & UserProfile API resources (probably needs restrictions to avoid anyone getting all entries in the DB…).# -*- coding: utf-8 -*-
import logging
from django.contrib.auth import get_user_model
from tastypie.resources import ModelResource
from tastypie import fields
from ..base.api import common_authentication, UserObjectsOnlyAuthorization
from .models import UserProfile
LOGGER = logging.getLogger(__name__)
User = get_user_model()
class EmberMeta:
# Ember-data expect the following 2 directives
always_return_data = True
allowed_methods = ('get', 'post', 'put', 'delete')
# These are specific to 1flow functionnals.
authentication = common_authentication
authorization = UserObjectsOnlyAuthorization()
class UserResource(ModelResource):
class Meta(EmberMeta):
queryset = User.objects.all()
resource_name = 'user'
class UserProfileResource(ModelResource):
user = fields.ForeignKey(UserResource, 'user')
class Meta(EmberMeta):
queryset = UserProfile.objects.all()
resource_name = 'user_profile'
|
<commit_before><commit_msg>Create the User & UserProfile API resources (probably needs restrictions to avoid anyone getting all entries in the DB…).<commit_after># -*- coding: utf-8 -*-
import logging
from django.contrib.auth import get_user_model
from tastypie.resources import ModelResource
from tastypie import fields
from ..base.api import common_authentication, UserObjectsOnlyAuthorization
from .models import UserProfile
LOGGER = logging.getLogger(__name__)
User = get_user_model()
class EmberMeta:
# Ember-data expect the following 2 directives
always_return_data = True
allowed_methods = ('get', 'post', 'put', 'delete')
# These are specific to 1flow functionnals.
authentication = common_authentication
authorization = UserObjectsOnlyAuthorization()
class UserResource(ModelResource):
class Meta(EmberMeta):
queryset = User.objects.all()
resource_name = 'user'
class UserProfileResource(ModelResource):
user = fields.ForeignKey(UserResource, 'user')
class Meta(EmberMeta):
queryset = UserProfile.objects.all()
resource_name = 'user_profile'
|
|
bb9d02276db7b753837119797c6a61fc4128d20e
|
tests/test_metrics.py
|
tests/test_metrics.py
|
from husc import metrics
import numpy as np
import os
import pandas as pd
from pymongo import MongoClient
import subprocess as sp
abspath = os.path.dirname(__file__)
def string2tuple(string_tuple):
# TODO add docstring
string_values = string_tuple.split(', ')
coords = (int(string_values[0][1:]), string_values[1][1:-2])
return coords
client = MongoClient('localhost', 27017)
db = client['myofusion_test']
collection = db.wells_test
if db.wells_test.find({}).count() == 0:
sp.Popen(['mongoimport', '-host', 'localhost:27017', '-d',
'myofusion_test', '-c', 'wells_test',
os.path.join(abspath, 'testdata/wells_test.json')])
time.sleep(2)
test_data = pd.read_csv(os.path.join(abspath, 'testdata/data_test.csv'),
index_col=0, converters={0: string2tuple})
def test_mongo_group_by():
expected = set({'Mbnl1': [(2490700, 'L13'), (2490702, 'L13')],
'Nudt3': [(2490702, 'L04'), (2490701, 'L04')],
'Lmbr1l': [(2490702, 'G03'), (2490701, 'G03')],
'Pknox1': [(2490702, 'H05'), (2490700, 'H05')]})
query = set(metrics.mongo_group_by(collection, 'gene_name'))
assert expected == query
def test_gene_distance_score():
expected_intra = []
for i in range(0, 4):
gene_pair = test_data.ix[2*i:2*i+2].values
expected_intra.append(np.linalg.norm(gene_pair[0] - gene_pair[1]))
intra, inter = metrics.gene_distance_score(test_data, collection)
np.testing.assert_array_almost_equal(expected_intra, intra, decimal=4)
def test_gene_distance_score2():
intra, inter = metrics.gene_distance_score(test_data, collection)
assert np.mean(intra) < np.mean(inter)
|
Add tests for metrics functions
|
Add tests for metrics functions
|
Python
|
bsd-3-clause
|
jni/microscopium,microscopium/microscopium,Don86/microscopium,Don86/microscopium,jni/microscopium,starcalibre/microscopium,microscopium/microscopium
|
Add tests for metrics functions
|
from husc import metrics
import numpy as np
import os
import pandas as pd
from pymongo import MongoClient
import subprocess as sp
abspath = os.path.dirname(__file__)
def string2tuple(string_tuple):
# TODO add docstring
string_values = string_tuple.split(', ')
coords = (int(string_values[0][1:]), string_values[1][1:-2])
return coords
client = MongoClient('localhost', 27017)
db = client['myofusion_test']
collection = db.wells_test
if db.wells_test.find({}).count() == 0:
sp.Popen(['mongoimport', '-host', 'localhost:27017', '-d',
'myofusion_test', '-c', 'wells_test',
os.path.join(abspath, 'testdata/wells_test.json')])
time.sleep(2)
test_data = pd.read_csv(os.path.join(abspath, 'testdata/data_test.csv'),
index_col=0, converters={0: string2tuple})
def test_mongo_group_by():
expected = set({'Mbnl1': [(2490700, 'L13'), (2490702, 'L13')],
'Nudt3': [(2490702, 'L04'), (2490701, 'L04')],
'Lmbr1l': [(2490702, 'G03'), (2490701, 'G03')],
'Pknox1': [(2490702, 'H05'), (2490700, 'H05')]})
query = set(metrics.mongo_group_by(collection, 'gene_name'))
assert expected == query
def test_gene_distance_score():
expected_intra = []
for i in range(0, 4):
gene_pair = test_data.ix[2*i:2*i+2].values
expected_intra.append(np.linalg.norm(gene_pair[0] - gene_pair[1]))
intra, inter = metrics.gene_distance_score(test_data, collection)
np.testing.assert_array_almost_equal(expected_intra, intra, decimal=4)
def test_gene_distance_score2():
intra, inter = metrics.gene_distance_score(test_data, collection)
assert np.mean(intra) < np.mean(inter)
|
<commit_before><commit_msg>Add tests for metrics functions<commit_after>
|
from husc import metrics
import numpy as np
import os
import pandas as pd
from pymongo import MongoClient
import subprocess as sp
abspath = os.path.dirname(__file__)
def string2tuple(string_tuple):
# TODO add docstring
string_values = string_tuple.split(', ')
coords = (int(string_values[0][1:]), string_values[1][1:-2])
return coords
client = MongoClient('localhost', 27017)
db = client['myofusion_test']
collection = db.wells_test
if db.wells_test.find({}).count() == 0:
sp.Popen(['mongoimport', '-host', 'localhost:27017', '-d',
'myofusion_test', '-c', 'wells_test',
os.path.join(abspath, 'testdata/wells_test.json')])
time.sleep(2)
test_data = pd.read_csv(os.path.join(abspath, 'testdata/data_test.csv'),
index_col=0, converters={0: string2tuple})
def test_mongo_group_by():
expected = set({'Mbnl1': [(2490700, 'L13'), (2490702, 'L13')],
'Nudt3': [(2490702, 'L04'), (2490701, 'L04')],
'Lmbr1l': [(2490702, 'G03'), (2490701, 'G03')],
'Pknox1': [(2490702, 'H05'), (2490700, 'H05')]})
query = set(metrics.mongo_group_by(collection, 'gene_name'))
assert expected == query
def test_gene_distance_score():
expected_intra = []
for i in range(0, 4):
gene_pair = test_data.ix[2*i:2*i+2].values
expected_intra.append(np.linalg.norm(gene_pair[0] - gene_pair[1]))
intra, inter = metrics.gene_distance_score(test_data, collection)
np.testing.assert_array_almost_equal(expected_intra, intra, decimal=4)
def test_gene_distance_score2():
intra, inter = metrics.gene_distance_score(test_data, collection)
assert np.mean(intra) < np.mean(inter)
|
Add tests for metrics functionsfrom husc import metrics
import numpy as np
import os
import pandas as pd
from pymongo import MongoClient
import subprocess as sp
abspath = os.path.dirname(__file__)
def string2tuple(string_tuple):
# TODO add docstring
string_values = string_tuple.split(', ')
coords = (int(string_values[0][1:]), string_values[1][1:-2])
return coords
client = MongoClient('localhost', 27017)
db = client['myofusion_test']
collection = db.wells_test
if db.wells_test.find({}).count() == 0:
sp.Popen(['mongoimport', '-host', 'localhost:27017', '-d',
'myofusion_test', '-c', 'wells_test',
os.path.join(abspath, 'testdata/wells_test.json')])
time.sleep(2)
test_data = pd.read_csv(os.path.join(abspath, 'testdata/data_test.csv'),
index_col=0, converters={0: string2tuple})
def test_mongo_group_by():
expected = set({'Mbnl1': [(2490700, 'L13'), (2490702, 'L13')],
'Nudt3': [(2490702, 'L04'), (2490701, 'L04')],
'Lmbr1l': [(2490702, 'G03'), (2490701, 'G03')],
'Pknox1': [(2490702, 'H05'), (2490700, 'H05')]})
query = set(metrics.mongo_group_by(collection, 'gene_name'))
assert expected == query
def test_gene_distance_score():
expected_intra = []
for i in range(0, 4):
gene_pair = test_data.ix[2*i:2*i+2].values
expected_intra.append(np.linalg.norm(gene_pair[0] - gene_pair[1]))
intra, inter = metrics.gene_distance_score(test_data, collection)
np.testing.assert_array_almost_equal(expected_intra, intra, decimal=4)
def test_gene_distance_score2():
intra, inter = metrics.gene_distance_score(test_data, collection)
assert np.mean(intra) < np.mean(inter)
|
<commit_before><commit_msg>Add tests for metrics functions<commit_after>from husc import metrics
import numpy as np
import os
import pandas as pd
from pymongo import MongoClient
import subprocess as sp
abspath = os.path.dirname(__file__)
def string2tuple(string_tuple):
# TODO add docstring
string_values = string_tuple.split(', ')
coords = (int(string_values[0][1:]), string_values[1][1:-2])
return coords
client = MongoClient('localhost', 27017)
db = client['myofusion_test']
collection = db.wells_test
if db.wells_test.find({}).count() == 0:
sp.Popen(['mongoimport', '-host', 'localhost:27017', '-d',
'myofusion_test', '-c', 'wells_test',
os.path.join(abspath, 'testdata/wells_test.json')])
time.sleep(2)
test_data = pd.read_csv(os.path.join(abspath, 'testdata/data_test.csv'),
index_col=0, converters={0: string2tuple})
def test_mongo_group_by():
expected = set({'Mbnl1': [(2490700, 'L13'), (2490702, 'L13')],
'Nudt3': [(2490702, 'L04'), (2490701, 'L04')],
'Lmbr1l': [(2490702, 'G03'), (2490701, 'G03')],
'Pknox1': [(2490702, 'H05'), (2490700, 'H05')]})
query = set(metrics.mongo_group_by(collection, 'gene_name'))
assert expected == query
def test_gene_distance_score():
expected_intra = []
for i in range(0, 4):
gene_pair = test_data.ix[2*i:2*i+2].values
expected_intra.append(np.linalg.norm(gene_pair[0] - gene_pair[1]))
intra, inter = metrics.gene_distance_score(test_data, collection)
np.testing.assert_array_almost_equal(expected_intra, intra, decimal=4)
def test_gene_distance_score2():
intra, inter = metrics.gene_distance_score(test_data, collection)
assert np.mean(intra) < np.mean(inter)
|
|
074e9d6328cb8e699c9afe717f70e15e884dd3d6
|
test_goodRqst_sockserver.py
|
test_goodRqst_sockserver.py
|
#!/usr/bin/env python
import socket_server, pytest, socket
address= ('127.0.0.1', 50000)
tester_client= socket.socket(
socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_IP)
def test_good_request():
request= "GET www.wombatlyfe.com HTTP/1.1\r\n"
tester_client.connect(address)
tester_client.sendall(request)
message= tester_client.recv(1032)
tester_client.shutdown(socket.SHUT_WR)
tester_client.close()
assert 'Bad Request' not in message
|
Add test to ensure that a proper output is recieved by client when proper request is made
|
Add test to ensure that a proper output is recieved by client when proper request is made
|
Python
|
mit
|
charlieRode/network_tools
|
Add test to ensure that a proper output is recieved by client when proper request is made
|
#!/usr/bin/env python
import socket_server, pytest, socket
address= ('127.0.0.1', 50000)
tester_client= socket.socket(
socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_IP)
def test_good_request():
request= "GET www.wombatlyfe.com HTTP/1.1\r\n"
tester_client.connect(address)
tester_client.sendall(request)
message= tester_client.recv(1032)
tester_client.shutdown(socket.SHUT_WR)
tester_client.close()
assert 'Bad Request' not in message
|
<commit_before><commit_msg>Add test to ensure that a proper output is recieved by client when proper request is made<commit_after>
|
#!/usr/bin/env python
import socket_server, pytest, socket
address= ('127.0.0.1', 50000)
tester_client= socket.socket(
socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_IP)
def test_good_request():
request= "GET www.wombatlyfe.com HTTP/1.1\r\n"
tester_client.connect(address)
tester_client.sendall(request)
message= tester_client.recv(1032)
tester_client.shutdown(socket.SHUT_WR)
tester_client.close()
assert 'Bad Request' not in message
|
Add test to ensure that a proper output is recieved by client when proper request is made#!/usr/bin/env python
import socket_server, pytest, socket
address= ('127.0.0.1', 50000)
tester_client= socket.socket(
socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_IP)
def test_good_request():
request= "GET www.wombatlyfe.com HTTP/1.1\r\n"
tester_client.connect(address)
tester_client.sendall(request)
message= tester_client.recv(1032)
tester_client.shutdown(socket.SHUT_WR)
tester_client.close()
assert 'Bad Request' not in message
|
<commit_before><commit_msg>Add test to ensure that a proper output is recieved by client when proper request is made<commit_after>#!/usr/bin/env python
import socket_server, pytest, socket
address= ('127.0.0.1', 50000)
tester_client= socket.socket(
socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_IP)
def test_good_request():
request= "GET www.wombatlyfe.com HTTP/1.1\r\n"
tester_client.connect(address)
tester_client.sendall(request)
message= tester_client.recv(1032)
tester_client.shutdown(socket.SHUT_WR)
tester_client.close()
assert 'Bad Request' not in message
|
|
01fc606eabef14dca0b3e7f6b2a5e5a52c360fba
|
condor/python/resync_dashboards.py
|
condor/python/resync_dashboards.py
|
#!/usr/bin/env python
import argparse
import sys
import logging
import elasticsearch
import elasticsearch.helpers
ES_NODES = 'uct2-es-door.mwt2.org'
VERSION = '0.1'
SOURCE_INDEX = '.kibana'
TARGET_INDEX = 'osg-connect-kibana'
def get_es_client():
""" Instantiate DB client and pass connection back """
return elasticsearch.Elasticsearch(hosts=ES_NODES,
retry_on_timeout=True,
max_retries=10,
timeout=300)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Reindex events from ' +
'.kibana ' +
'to osg-connect-kibana')
args = parser.parse_args(sys.argv[1:])
client = get_es_client()
results = elasticsearch.helpers.reindex(client,
SOURCE_INDEX,
TARGET_INDEX,
scroll='30m')
sys.stdout.write(results)
|
Add script to sync dashboards
|
Add script to sync dashboards
|
Python
|
apache-2.0
|
DHTC-Tools/logstash-confs,DHTC-Tools/logstash-confs,DHTC-Tools/logstash-confs
|
Add script to sync dashboards
|
#!/usr/bin/env python
import argparse
import sys
import logging
import elasticsearch
import elasticsearch.helpers
ES_NODES = 'uct2-es-door.mwt2.org'
VERSION = '0.1'
SOURCE_INDEX = '.kibana'
TARGET_INDEX = 'osg-connect-kibana'
def get_es_client():
""" Instantiate DB client and pass connection back """
return elasticsearch.Elasticsearch(hosts=ES_NODES,
retry_on_timeout=True,
max_retries=10,
timeout=300)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Reindex events from ' +
'.kibana ' +
'to osg-connect-kibana')
args = parser.parse_args(sys.argv[1:])
client = get_es_client()
results = elasticsearch.helpers.reindex(client,
SOURCE_INDEX,
TARGET_INDEX,
scroll='30m')
sys.stdout.write(results)
|
<commit_before><commit_msg>Add script to sync dashboards<commit_after>
|
#!/usr/bin/env python
import argparse
import sys
import logging
import elasticsearch
import elasticsearch.helpers
ES_NODES = 'uct2-es-door.mwt2.org'
VERSION = '0.1'
SOURCE_INDEX = '.kibana'
TARGET_INDEX = 'osg-connect-kibana'
def get_es_client():
""" Instantiate DB client and pass connection back """
return elasticsearch.Elasticsearch(hosts=ES_NODES,
retry_on_timeout=True,
max_retries=10,
timeout=300)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Reindex events from ' +
'.kibana ' +
'to osg-connect-kibana')
args = parser.parse_args(sys.argv[1:])
client = get_es_client()
results = elasticsearch.helpers.reindex(client,
SOURCE_INDEX,
TARGET_INDEX,
scroll='30m')
sys.stdout.write(results)
|
Add script to sync dashboards#!/usr/bin/env python
import argparse
import sys
import logging
import elasticsearch
import elasticsearch.helpers
ES_NODES = 'uct2-es-door.mwt2.org'
VERSION = '0.1'
SOURCE_INDEX = '.kibana'
TARGET_INDEX = 'osg-connect-kibana'
def get_es_client():
""" Instantiate DB client and pass connection back """
return elasticsearch.Elasticsearch(hosts=ES_NODES,
retry_on_timeout=True,
max_retries=10,
timeout=300)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Reindex events from ' +
'.kibana ' +
'to osg-connect-kibana')
args = parser.parse_args(sys.argv[1:])
client = get_es_client()
results = elasticsearch.helpers.reindex(client,
SOURCE_INDEX,
TARGET_INDEX,
scroll='30m')
sys.stdout.write(results)
|
<commit_before><commit_msg>Add script to sync dashboards<commit_after>#!/usr/bin/env python
import argparse
import sys
import logging
import elasticsearch
import elasticsearch.helpers
ES_NODES = 'uct2-es-door.mwt2.org'
VERSION = '0.1'
SOURCE_INDEX = '.kibana'
TARGET_INDEX = 'osg-connect-kibana'
def get_es_client():
""" Instantiate DB client and pass connection back """
return elasticsearch.Elasticsearch(hosts=ES_NODES,
retry_on_timeout=True,
max_retries=10,
timeout=300)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Reindex events from ' +
'.kibana ' +
'to osg-connect-kibana')
args = parser.parse_args(sys.argv[1:])
client = get_es_client()
results = elasticsearch.helpers.reindex(client,
SOURCE_INDEX,
TARGET_INDEX,
scroll='30m')
sys.stdout.write(results)
|
|
8038e55e93b7e307d02c1f88527e06202801579e
|
notebooks/Model_Output/solutions/map.py
|
notebooks/Model_Output/solutions/map.py
|
import numpy as np
# Set up an NCSS query from thredds using siphon
query = ncss.query()
query.accept('netcdf4')
query.variables('Temperature_isobaric', 'Geopotential_height_isobaric')
query.vertical_level(50000)
now = datetime.utcnow()
query.time_range(now, now + timedelta(days=1))
query.lonlat_box(west=-110, east=-45, north=50, south=10)
# Download data using NCSS
data = ncss.get_data(query)
ds = xr.open_dataset(NetCDF4DataStore(data))
temp_var = ds.metpy.parse_cf('Temperature_isobaric')
height_var = ds.metpy.parse_cf('Geopotential_height_isobaric')
longitude = temp_var.metpy.x
latitude = temp_var.metpy.y
time_index = 0
# Plot using CartoPy and Matplotlib
fig = plt.figure(figsize=(12, 8))
ax = fig.add_subplot(1, 1, 1, projection=ccrs.LambertConformal())
contours = np.arange(5000, 6000, 80)
ax.pcolormesh(longitude, latitude, temp_var[time_index].squeeze(),
transform=data_projection, zorder=0)
ax.contour(longitude, latitude, height_var[time_index].squeeze(), contours, colors='k',
transform=data_projection, linewidths=2, zorder=1)
ax.set_title(temp_var.metpy.time[time_index].values)
# add some common geographic features
ax.add_feature(cfeature.COASTLINES)
ax.add_feature(cfeature.STATES, edgecolor='black')
ax.add_feature(cfeature.BORDERS)
# add some lat/lon gridlines
ax.gridlines()
|
Add solutions script for ModelOutput
|
Add solutions script for ModelOutput
|
Python
|
mit
|
julienchastang/unidata-python-workshop,Unidata/unidata-python-workshop,julienchastang/unidata-python-workshop
|
Add solutions script for ModelOutput
|
import numpy as np
# Set up an NCSS query from thredds using siphon
query = ncss.query()
query.accept('netcdf4')
query.variables('Temperature_isobaric', 'Geopotential_height_isobaric')
query.vertical_level(50000)
now = datetime.utcnow()
query.time_range(now, now + timedelta(days=1))
query.lonlat_box(west=-110, east=-45, north=50, south=10)
# Download data using NCSS
data = ncss.get_data(query)
ds = xr.open_dataset(NetCDF4DataStore(data))
temp_var = ds.metpy.parse_cf('Temperature_isobaric')
height_var = ds.metpy.parse_cf('Geopotential_height_isobaric')
longitude = temp_var.metpy.x
latitude = temp_var.metpy.y
time_index = 0
# Plot using CartoPy and Matplotlib
fig = plt.figure(figsize=(12, 8))
ax = fig.add_subplot(1, 1, 1, projection=ccrs.LambertConformal())
contours = np.arange(5000, 6000, 80)
ax.pcolormesh(longitude, latitude, temp_var[time_index].squeeze(),
transform=data_projection, zorder=0)
ax.contour(longitude, latitude, height_var[time_index].squeeze(), contours, colors='k',
transform=data_projection, linewidths=2, zorder=1)
ax.set_title(temp_var.metpy.time[time_index].values)
# add some common geographic features
ax.add_feature(cfeature.COASTLINES)
ax.add_feature(cfeature.STATES, edgecolor='black')
ax.add_feature(cfeature.BORDERS)
# add some lat/lon gridlines
ax.gridlines()
|
<commit_before><commit_msg>Add solutions script for ModelOutput<commit_after>
|
import numpy as np
# Set up an NCSS query from thredds using siphon
query = ncss.query()
query.accept('netcdf4')
query.variables('Temperature_isobaric', 'Geopotential_height_isobaric')
query.vertical_level(50000)
now = datetime.utcnow()
query.time_range(now, now + timedelta(days=1))
query.lonlat_box(west=-110, east=-45, north=50, south=10)
# Download data using NCSS
data = ncss.get_data(query)
ds = xr.open_dataset(NetCDF4DataStore(data))
temp_var = ds.metpy.parse_cf('Temperature_isobaric')
height_var = ds.metpy.parse_cf('Geopotential_height_isobaric')
longitude = temp_var.metpy.x
latitude = temp_var.metpy.y
time_index = 0
# Plot using CartoPy and Matplotlib
fig = plt.figure(figsize=(12, 8))
ax = fig.add_subplot(1, 1, 1, projection=ccrs.LambertConformal())
contours = np.arange(5000, 6000, 80)
ax.pcolormesh(longitude, latitude, temp_var[time_index].squeeze(),
transform=data_projection, zorder=0)
ax.contour(longitude, latitude, height_var[time_index].squeeze(), contours, colors='k',
transform=data_projection, linewidths=2, zorder=1)
ax.set_title(temp_var.metpy.time[time_index].values)
# add some common geographic features
ax.add_feature(cfeature.COASTLINES)
ax.add_feature(cfeature.STATES, edgecolor='black')
ax.add_feature(cfeature.BORDERS)
# add some lat/lon gridlines
ax.gridlines()
|
Add solutions script for ModelOutputimport numpy as np
# Set up an NCSS query from thredds using siphon
query = ncss.query()
query.accept('netcdf4')
query.variables('Temperature_isobaric', 'Geopotential_height_isobaric')
query.vertical_level(50000)
now = datetime.utcnow()
query.time_range(now, now + timedelta(days=1))
query.lonlat_box(west=-110, east=-45, north=50, south=10)
# Download data using NCSS
data = ncss.get_data(query)
ds = xr.open_dataset(NetCDF4DataStore(data))
temp_var = ds.metpy.parse_cf('Temperature_isobaric')
height_var = ds.metpy.parse_cf('Geopotential_height_isobaric')
longitude = temp_var.metpy.x
latitude = temp_var.metpy.y
time_index = 0
# Plot using CartoPy and Matplotlib
fig = plt.figure(figsize=(12, 8))
ax = fig.add_subplot(1, 1, 1, projection=ccrs.LambertConformal())
contours = np.arange(5000, 6000, 80)
ax.pcolormesh(longitude, latitude, temp_var[time_index].squeeze(),
transform=data_projection, zorder=0)
ax.contour(longitude, latitude, height_var[time_index].squeeze(), contours, colors='k',
transform=data_projection, linewidths=2, zorder=1)
ax.set_title(temp_var.metpy.time[time_index].values)
# add some common geographic features
ax.add_feature(cfeature.COASTLINES)
ax.add_feature(cfeature.STATES, edgecolor='black')
ax.add_feature(cfeature.BORDERS)
# add some lat/lon gridlines
ax.gridlines()
|
<commit_before><commit_msg>Add solutions script for ModelOutput<commit_after>import numpy as np
# Set up an NCSS query from thredds using siphon
query = ncss.query()
query.accept('netcdf4')
query.variables('Temperature_isobaric', 'Geopotential_height_isobaric')
query.vertical_level(50000)
now = datetime.utcnow()
query.time_range(now, now + timedelta(days=1))
query.lonlat_box(west=-110, east=-45, north=50, south=10)
# Download data using NCSS
data = ncss.get_data(query)
ds = xr.open_dataset(NetCDF4DataStore(data))
temp_var = ds.metpy.parse_cf('Temperature_isobaric')
height_var = ds.metpy.parse_cf('Geopotential_height_isobaric')
longitude = temp_var.metpy.x
latitude = temp_var.metpy.y
time_index = 0
# Plot using CartoPy and Matplotlib
fig = plt.figure(figsize=(12, 8))
ax = fig.add_subplot(1, 1, 1, projection=ccrs.LambertConformal())
contours = np.arange(5000, 6000, 80)
ax.pcolormesh(longitude, latitude, temp_var[time_index].squeeze(),
transform=data_projection, zorder=0)
ax.contour(longitude, latitude, height_var[time_index].squeeze(), contours, colors='k',
transform=data_projection, linewidths=2, zorder=1)
ax.set_title(temp_var.metpy.time[time_index].values)
# add some common geographic features
ax.add_feature(cfeature.COASTLINES)
ax.add_feature(cfeature.STATES, edgecolor='black')
ax.add_feature(cfeature.BORDERS)
# add some lat/lon gridlines
ax.gridlines()
|
|
aca9fde589adba08fe335b2441c73e3f3769cf67
|
paasta_tools/contrib/shared_ip_check.py
|
paasta_tools/contrib/shared_ip_check.py
|
import sys
from collections import defaultdict
from paasta_tools.utils import get_docker_client
def main():
docker_client = get_docker_client()
ip_to_containers = defaultdict(list)
for container in docker_client.containers():
networks = container['NetworkSettings']['Networks']
if 'bridge' in networks:
ip = networks['bridge']['IPAddress']
if ip:
ip_to_containers[ip].append(container)
output = []
for ip, containers in ip_to_containers.items():
if len(containers) > 1:
output.append('{} shared by the following containers:'.format(ip))
for container in containers:
output.append(' Image: {}'.format(container['Image']))
output.append(' ID: {}'.format(container['Id']))
output.append(' State: {}'.format(container['State']))
output.append(' Status: {}'.format(container['Status']))
output.append('')
if output:
print('CRITICAL - There are multiple Docker containers assigned to the same IP.')
print('There should only be one per IP. Choose one to keep and try stopping the others.')
print('\n'.join(output))
return 2
else:
print('OK - No Docker containers sharing an IP on this host.')
return 0
if __name__ == '__main__':
sys.exit(main())
|
Add script to monitor for containers sharing an IP
|
Add script to monitor for containers sharing an IP
|
Python
|
apache-2.0
|
somic/paasta,Yelp/paasta,somic/paasta,Yelp/paasta
|
Add script to monitor for containers sharing an IP
|
import sys
from collections import defaultdict
from paasta_tools.utils import get_docker_client
def main():
docker_client = get_docker_client()
ip_to_containers = defaultdict(list)
for container in docker_client.containers():
networks = container['NetworkSettings']['Networks']
if 'bridge' in networks:
ip = networks['bridge']['IPAddress']
if ip:
ip_to_containers[ip].append(container)
output = []
for ip, containers in ip_to_containers.items():
if len(containers) > 1:
output.append('{} shared by the following containers:'.format(ip))
for container in containers:
output.append(' Image: {}'.format(container['Image']))
output.append(' ID: {}'.format(container['Id']))
output.append(' State: {}'.format(container['State']))
output.append(' Status: {}'.format(container['Status']))
output.append('')
if output:
print('CRITICAL - There are multiple Docker containers assigned to the same IP.')
print('There should only be one per IP. Choose one to keep and try stopping the others.')
print('\n'.join(output))
return 2
else:
print('OK - No Docker containers sharing an IP on this host.')
return 0
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>Add script to monitor for containers sharing an IP<commit_after>
|
import sys
from collections import defaultdict
from paasta_tools.utils import get_docker_client
def main():
docker_client = get_docker_client()
ip_to_containers = defaultdict(list)
for container in docker_client.containers():
networks = container['NetworkSettings']['Networks']
if 'bridge' in networks:
ip = networks['bridge']['IPAddress']
if ip:
ip_to_containers[ip].append(container)
output = []
for ip, containers in ip_to_containers.items():
if len(containers) > 1:
output.append('{} shared by the following containers:'.format(ip))
for container in containers:
output.append(' Image: {}'.format(container['Image']))
output.append(' ID: {}'.format(container['Id']))
output.append(' State: {}'.format(container['State']))
output.append(' Status: {}'.format(container['Status']))
output.append('')
if output:
print('CRITICAL - There are multiple Docker containers assigned to the same IP.')
print('There should only be one per IP. Choose one to keep and try stopping the others.')
print('\n'.join(output))
return 2
else:
print('OK - No Docker containers sharing an IP on this host.')
return 0
if __name__ == '__main__':
sys.exit(main())
|
Add script to monitor for containers sharing an IPimport sys
from collections import defaultdict
from paasta_tools.utils import get_docker_client
def main():
docker_client = get_docker_client()
ip_to_containers = defaultdict(list)
for container in docker_client.containers():
networks = container['NetworkSettings']['Networks']
if 'bridge' in networks:
ip = networks['bridge']['IPAddress']
if ip:
ip_to_containers[ip].append(container)
output = []
for ip, containers in ip_to_containers.items():
if len(containers) > 1:
output.append('{} shared by the following containers:'.format(ip))
for container in containers:
output.append(' Image: {}'.format(container['Image']))
output.append(' ID: {}'.format(container['Id']))
output.append(' State: {}'.format(container['State']))
output.append(' Status: {}'.format(container['Status']))
output.append('')
if output:
print('CRITICAL - There are multiple Docker containers assigned to the same IP.')
print('There should only be one per IP. Choose one to keep and try stopping the others.')
print('\n'.join(output))
return 2
else:
print('OK - No Docker containers sharing an IP on this host.')
return 0
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>Add script to monitor for containers sharing an IP<commit_after>import sys
from collections import defaultdict
from paasta_tools.utils import get_docker_client
def main():
docker_client = get_docker_client()
ip_to_containers = defaultdict(list)
for container in docker_client.containers():
networks = container['NetworkSettings']['Networks']
if 'bridge' in networks:
ip = networks['bridge']['IPAddress']
if ip:
ip_to_containers[ip].append(container)
output = []
for ip, containers in ip_to_containers.items():
if len(containers) > 1:
output.append('{} shared by the following containers:'.format(ip))
for container in containers:
output.append(' Image: {}'.format(container['Image']))
output.append(' ID: {}'.format(container['Id']))
output.append(' State: {}'.format(container['State']))
output.append(' Status: {}'.format(container['Status']))
output.append('')
if output:
print('CRITICAL - There are multiple Docker containers assigned to the same IP.')
print('There should only be one per IP. Choose one to keep and try stopping the others.')
print('\n'.join(output))
return 2
else:
print('OK - No Docker containers sharing an IP on this host.')
return 0
if __name__ == '__main__':
sys.exit(main())
|
|
671825d63458712ca06dbcd1fe2028d64aa48e7d
|
jarn/mkrelease/tests/test_defaults.py
|
jarn/mkrelease/tests/test_defaults.py
|
import unittest
from jarn.mkrelease.mkrelease import Defaults
from jarn.mkrelease.testing import JailSetup
class DefaultsTests(JailSetup):
def test_defaults_defaults(self):
self.mkfile('my.cfg', """
[mkrelease]
""")
defaults = Defaults('my.cfg')
self.assertEqual(defaults.distbase, '')
self.assertEqual(defaults.distdefault, [])
self.assertEqual(defaults.commit, True)
self.assertEqual(defaults.tag, True)
self.assertEqual(defaults.register, True)
self.assertEqual(defaults.upload, True)
self.assertEqual(defaults.sign, False)
self.assertEqual(defaults.push, False)
self.assertEqual(defaults.develop, False)
self.assertEqual(defaults.quiet, False)
self.assertEqual(defaults.identity, '')
self.assertEqual(defaults.formats, [])
self.assertEqual(defaults.aliases, {})
#self.assertEqual(defaults.servers, {})
def test_read_defaults(self):
self.mkfile('my.cfg', """
[mkrelease]
distbase = bedrock.com:
distdefault = public
commit = false
tag = 0
register = no
upload = off
sign = true
push = 1
develop = yes
quiet = on
identity = fred@bedrock.com
formats = zip wheel
[aliases]
public = bedrock.com:eggs
""")
defaults = Defaults('my.cfg')
self.assertEqual(defaults.distbase, 'bedrock.com:')
self.assertEqual(defaults.distdefault, ['public'])
self.assertEqual(defaults.commit, False)
self.assertEqual(defaults.tag, False)
self.assertEqual(defaults.register, False)
self.assertEqual(defaults.upload, False)
self.assertEqual(defaults.sign, True)
self.assertEqual(defaults.push, True)
self.assertEqual(defaults.develop, True)
self.assertEqual(defaults.quiet, True)
self.assertEqual(defaults.identity, 'fred@bedrock.com')
self.assertEqual(defaults.formats, ['zip', 'wheel'])
self.assertEqual(defaults.aliases, {'public': ['bedrock.com:eggs']})
#self.assertEqual(defaults.servers, {})
def test_suite():
return unittest.defaultTestLoader.loadTestsFromName(__name__)
|
Add tests for new defaults.
|
Add tests for new defaults.
|
Python
|
bsd-2-clause
|
Jarn/jarn.mkrelease
|
Add tests for new defaults.
|
import unittest
from jarn.mkrelease.mkrelease import Defaults
from jarn.mkrelease.testing import JailSetup
class DefaultsTests(JailSetup):
def test_defaults_defaults(self):
self.mkfile('my.cfg', """
[mkrelease]
""")
defaults = Defaults('my.cfg')
self.assertEqual(defaults.distbase, '')
self.assertEqual(defaults.distdefault, [])
self.assertEqual(defaults.commit, True)
self.assertEqual(defaults.tag, True)
self.assertEqual(defaults.register, True)
self.assertEqual(defaults.upload, True)
self.assertEqual(defaults.sign, False)
self.assertEqual(defaults.push, False)
self.assertEqual(defaults.develop, False)
self.assertEqual(defaults.quiet, False)
self.assertEqual(defaults.identity, '')
self.assertEqual(defaults.formats, [])
self.assertEqual(defaults.aliases, {})
#self.assertEqual(defaults.servers, {})
def test_read_defaults(self):
self.mkfile('my.cfg', """
[mkrelease]
distbase = bedrock.com:
distdefault = public
commit = false
tag = 0
register = no
upload = off
sign = true
push = 1
develop = yes
quiet = on
identity = fred@bedrock.com
formats = zip wheel
[aliases]
public = bedrock.com:eggs
""")
defaults = Defaults('my.cfg')
self.assertEqual(defaults.distbase, 'bedrock.com:')
self.assertEqual(defaults.distdefault, ['public'])
self.assertEqual(defaults.commit, False)
self.assertEqual(defaults.tag, False)
self.assertEqual(defaults.register, False)
self.assertEqual(defaults.upload, False)
self.assertEqual(defaults.sign, True)
self.assertEqual(defaults.push, True)
self.assertEqual(defaults.develop, True)
self.assertEqual(defaults.quiet, True)
self.assertEqual(defaults.identity, 'fred@bedrock.com')
self.assertEqual(defaults.formats, ['zip', 'wheel'])
self.assertEqual(defaults.aliases, {'public': ['bedrock.com:eggs']})
#self.assertEqual(defaults.servers, {})
def test_suite():
return unittest.defaultTestLoader.loadTestsFromName(__name__)
|
<commit_before><commit_msg>Add tests for new defaults.<commit_after>
|
import unittest
from jarn.mkrelease.mkrelease import Defaults
from jarn.mkrelease.testing import JailSetup
class DefaultsTests(JailSetup):
def test_defaults_defaults(self):
self.mkfile('my.cfg', """
[mkrelease]
""")
defaults = Defaults('my.cfg')
self.assertEqual(defaults.distbase, '')
self.assertEqual(defaults.distdefault, [])
self.assertEqual(defaults.commit, True)
self.assertEqual(defaults.tag, True)
self.assertEqual(defaults.register, True)
self.assertEqual(defaults.upload, True)
self.assertEqual(defaults.sign, False)
self.assertEqual(defaults.push, False)
self.assertEqual(defaults.develop, False)
self.assertEqual(defaults.quiet, False)
self.assertEqual(defaults.identity, '')
self.assertEqual(defaults.formats, [])
self.assertEqual(defaults.aliases, {})
#self.assertEqual(defaults.servers, {})
def test_read_defaults(self):
self.mkfile('my.cfg', """
[mkrelease]
distbase = bedrock.com:
distdefault = public
commit = false
tag = 0
register = no
upload = off
sign = true
push = 1
develop = yes
quiet = on
identity = fred@bedrock.com
formats = zip wheel
[aliases]
public = bedrock.com:eggs
""")
defaults = Defaults('my.cfg')
self.assertEqual(defaults.distbase, 'bedrock.com:')
self.assertEqual(defaults.distdefault, ['public'])
self.assertEqual(defaults.commit, False)
self.assertEqual(defaults.tag, False)
self.assertEqual(defaults.register, False)
self.assertEqual(defaults.upload, False)
self.assertEqual(defaults.sign, True)
self.assertEqual(defaults.push, True)
self.assertEqual(defaults.develop, True)
self.assertEqual(defaults.quiet, True)
self.assertEqual(defaults.identity, 'fred@bedrock.com')
self.assertEqual(defaults.formats, ['zip', 'wheel'])
self.assertEqual(defaults.aliases, {'public': ['bedrock.com:eggs']})
#self.assertEqual(defaults.servers, {})
def test_suite():
return unittest.defaultTestLoader.loadTestsFromName(__name__)
|
Add tests for new defaults.import unittest
from jarn.mkrelease.mkrelease import Defaults
from jarn.mkrelease.testing import JailSetup
class DefaultsTests(JailSetup):
def test_defaults_defaults(self):
self.mkfile('my.cfg', """
[mkrelease]
""")
defaults = Defaults('my.cfg')
self.assertEqual(defaults.distbase, '')
self.assertEqual(defaults.distdefault, [])
self.assertEqual(defaults.commit, True)
self.assertEqual(defaults.tag, True)
self.assertEqual(defaults.register, True)
self.assertEqual(defaults.upload, True)
self.assertEqual(defaults.sign, False)
self.assertEqual(defaults.push, False)
self.assertEqual(defaults.develop, False)
self.assertEqual(defaults.quiet, False)
self.assertEqual(defaults.identity, '')
self.assertEqual(defaults.formats, [])
self.assertEqual(defaults.aliases, {})
#self.assertEqual(defaults.servers, {})
def test_read_defaults(self):
self.mkfile('my.cfg', """
[mkrelease]
distbase = bedrock.com:
distdefault = public
commit = false
tag = 0
register = no
upload = off
sign = true
push = 1
develop = yes
quiet = on
identity = fred@bedrock.com
formats = zip wheel
[aliases]
public = bedrock.com:eggs
""")
defaults = Defaults('my.cfg')
self.assertEqual(defaults.distbase, 'bedrock.com:')
self.assertEqual(defaults.distdefault, ['public'])
self.assertEqual(defaults.commit, False)
self.assertEqual(defaults.tag, False)
self.assertEqual(defaults.register, False)
self.assertEqual(defaults.upload, False)
self.assertEqual(defaults.sign, True)
self.assertEqual(defaults.push, True)
self.assertEqual(defaults.develop, True)
self.assertEqual(defaults.quiet, True)
self.assertEqual(defaults.identity, 'fred@bedrock.com')
self.assertEqual(defaults.formats, ['zip', 'wheel'])
self.assertEqual(defaults.aliases, {'public': ['bedrock.com:eggs']})
#self.assertEqual(defaults.servers, {})
def test_suite():
return unittest.defaultTestLoader.loadTestsFromName(__name__)
|
<commit_before><commit_msg>Add tests for new defaults.<commit_after>import unittest
from jarn.mkrelease.mkrelease import Defaults
from jarn.mkrelease.testing import JailSetup
class DefaultsTests(JailSetup):
def test_defaults_defaults(self):
self.mkfile('my.cfg', """
[mkrelease]
""")
defaults = Defaults('my.cfg')
self.assertEqual(defaults.distbase, '')
self.assertEqual(defaults.distdefault, [])
self.assertEqual(defaults.commit, True)
self.assertEqual(defaults.tag, True)
self.assertEqual(defaults.register, True)
self.assertEqual(defaults.upload, True)
self.assertEqual(defaults.sign, False)
self.assertEqual(defaults.push, False)
self.assertEqual(defaults.develop, False)
self.assertEqual(defaults.quiet, False)
self.assertEqual(defaults.identity, '')
self.assertEqual(defaults.formats, [])
self.assertEqual(defaults.aliases, {})
#self.assertEqual(defaults.servers, {})
def test_read_defaults(self):
self.mkfile('my.cfg', """
[mkrelease]
distbase = bedrock.com:
distdefault = public
commit = false
tag = 0
register = no
upload = off
sign = true
push = 1
develop = yes
quiet = on
identity = fred@bedrock.com
formats = zip wheel
[aliases]
public = bedrock.com:eggs
""")
defaults = Defaults('my.cfg')
self.assertEqual(defaults.distbase, 'bedrock.com:')
self.assertEqual(defaults.distdefault, ['public'])
self.assertEqual(defaults.commit, False)
self.assertEqual(defaults.tag, False)
self.assertEqual(defaults.register, False)
self.assertEqual(defaults.upload, False)
self.assertEqual(defaults.sign, True)
self.assertEqual(defaults.push, True)
self.assertEqual(defaults.develop, True)
self.assertEqual(defaults.quiet, True)
self.assertEqual(defaults.identity, 'fred@bedrock.com')
self.assertEqual(defaults.formats, ['zip', 'wheel'])
self.assertEqual(defaults.aliases, {'public': ['bedrock.com:eggs']})
#self.assertEqual(defaults.servers, {})
def test_suite():
return unittest.defaultTestLoader.loadTestsFromName(__name__)
|
|
d65182338acfc58bc4b87c1b9c506dd2b2c80a38
|
project2/stepwise_forward_selection.py
|
project2/stepwise_forward_selection.py
|
""" Created by Max Robinson 9/20/2017 """
class SFS:
pass
"""
Pseudo code
function SFS(Features, D_train, D_valid, Learn()):
F_0 = <>
basePerf = -inf
do:
bestPerf = - inf
for all Features in FeatureSpace do:
F_0 = F_0 + F
h = Learn(F_0, D_train)
currPerf = Perf(h, D_valid)
if currPerf > bestPerf then:
bestPerf = currPerf
bestF = F
end if
F_0 = F_0 - F
end for
if bestPerf > basePerf then
basePerf = bestPerf
F = F - bestF
F_0 = F_0 + bestF
else
exit (Break)
end if
until F = <> (is empty)
return F_0
"""
|
Add skeleton for SFS algorithm
|
Add skeleton for SFS algorithm
|
Python
|
apache-2.0
|
MaxRobinson/CS449,MaxRobinson/CS449,MaxRobinson/CS449
|
Add skeleton for SFS algorithm
|
""" Created by Max Robinson 9/20/2017 """
class SFS:
pass
"""
Pseudo code
function SFS(Features, D_train, D_valid, Learn()):
F_0 = <>
basePerf = -inf
do:
bestPerf = - inf
for all Features in FeatureSpace do:
F_0 = F_0 + F
h = Learn(F_0, D_train)
currPerf = Perf(h, D_valid)
if currPerf > bestPerf then:
bestPerf = currPerf
bestF = F
end if
F_0 = F_0 - F
end for
if bestPerf > basePerf then
basePerf = bestPerf
F = F - bestF
F_0 = F_0 + bestF
else
exit (Break)
end if
until F = <> (is empty)
return F_0
"""
|
<commit_before><commit_msg>Add skeleton for SFS algorithm<commit_after>
|
""" Created by Max Robinson 9/20/2017 """
class SFS:
pass
"""
Pseudo code
function SFS(Features, D_train, D_valid, Learn()):
F_0 = <>
basePerf = -inf
do:
bestPerf = - inf
for all Features in FeatureSpace do:
F_0 = F_0 + F
h = Learn(F_0, D_train)
currPerf = Perf(h, D_valid)
if currPerf > bestPerf then:
bestPerf = currPerf
bestF = F
end if
F_0 = F_0 - F
end for
if bestPerf > basePerf then
basePerf = bestPerf
F = F - bestF
F_0 = F_0 + bestF
else
exit (Break)
end if
until F = <> (is empty)
return F_0
"""
|
Add skeleton for SFS algorithm""" Created by Max Robinson 9/20/2017 """
class SFS:
pass
"""
Pseudo code
function SFS(Features, D_train, D_valid, Learn()):
F_0 = <>
basePerf = -inf
do:
bestPerf = - inf
for all Features in FeatureSpace do:
F_0 = F_0 + F
h = Learn(F_0, D_train)
currPerf = Perf(h, D_valid)
if currPerf > bestPerf then:
bestPerf = currPerf
bestF = F
end if
F_0 = F_0 - F
end for
if bestPerf > basePerf then
basePerf = bestPerf
F = F - bestF
F_0 = F_0 + bestF
else
exit (Break)
end if
until F = <> (is empty)
return F_0
"""
|
<commit_before><commit_msg>Add skeleton for SFS algorithm<commit_after>""" Created by Max Robinson 9/20/2017 """
class SFS:
pass
"""
Pseudo code
function SFS(Features, D_train, D_valid, Learn()):
F_0 = <>
basePerf = -inf
do:
bestPerf = - inf
for all Features in FeatureSpace do:
F_0 = F_0 + F
h = Learn(F_0, D_train)
currPerf = Perf(h, D_valid)
if currPerf > bestPerf then:
bestPerf = currPerf
bestF = F
end if
F_0 = F_0 - F
end for
if bestPerf > basePerf then
basePerf = bestPerf
F = F - bestF
F_0 = F_0 + bestF
else
exit (Break)
end if
until F = <> (is empty)
return F_0
"""
|
|
b365e5132a819e357bd960721fd7506fe538bbbf
|
corehq/apps/es/fake/forms_fake.py
|
corehq/apps/es/fake/forms_fake.py
|
from copy import deepcopy
from dateutil import parser
from corehq.pillows.xform import transform_xform_for_elasticsearch
from corehq.apps.es.fake.es_query_fake import HQESQueryFake
class FormESFake(HQESQueryFake):
_all_docs = []
def domain(self, domain):
return self._filtered(
lambda doc: (doc.get('domain') == domain
or domain in doc.get('domains', [])))
def xmlns(self, xmlns):
return self.term('xmlns.exact', xmlns)
def completed(self, gt=None, gte=None, lt=None, lte=None):
return self.date_range('form.meta.timeEnd', gt, gte, lt, lte)
@staticmethod
def transform_doc(doc):
doc = deepcopy(doc)
doc['xmlns.exact'] = doc.get('xmlns', '')
doc['form.meta.timeEnd'] = parser.parse(doc['form']['meta']['timeEnd'])
return transform_xform_for_elasticsearch(doc)
def count(self):
return len(self._result_docs)
|
Add a fake es forms orm
|
Add a fake es forms orm
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
|
Add a fake es forms orm
|
from copy import deepcopy
from dateutil import parser
from corehq.pillows.xform import transform_xform_for_elasticsearch
from corehq.apps.es.fake.es_query_fake import HQESQueryFake
class FormESFake(HQESQueryFake):
_all_docs = []
def domain(self, domain):
return self._filtered(
lambda doc: (doc.get('domain') == domain
or domain in doc.get('domains', [])))
def xmlns(self, xmlns):
return self.term('xmlns.exact', xmlns)
def completed(self, gt=None, gte=None, lt=None, lte=None):
return self.date_range('form.meta.timeEnd', gt, gte, lt, lte)
@staticmethod
def transform_doc(doc):
doc = deepcopy(doc)
doc['xmlns.exact'] = doc.get('xmlns', '')
doc['form.meta.timeEnd'] = parser.parse(doc['form']['meta']['timeEnd'])
return transform_xform_for_elasticsearch(doc)
def count(self):
return len(self._result_docs)
|
<commit_before><commit_msg>Add a fake es forms orm<commit_after>
|
from copy import deepcopy
from dateutil import parser
from corehq.pillows.xform import transform_xform_for_elasticsearch
from corehq.apps.es.fake.es_query_fake import HQESQueryFake
class FormESFake(HQESQueryFake):
_all_docs = []
def domain(self, domain):
return self._filtered(
lambda doc: (doc.get('domain') == domain
or domain in doc.get('domains', [])))
def xmlns(self, xmlns):
return self.term('xmlns.exact', xmlns)
def completed(self, gt=None, gte=None, lt=None, lte=None):
return self.date_range('form.meta.timeEnd', gt, gte, lt, lte)
@staticmethod
def transform_doc(doc):
doc = deepcopy(doc)
doc['xmlns.exact'] = doc.get('xmlns', '')
doc['form.meta.timeEnd'] = parser.parse(doc['form']['meta']['timeEnd'])
return transform_xform_for_elasticsearch(doc)
def count(self):
return len(self._result_docs)
|
Add a fake es forms ormfrom copy import deepcopy
from dateutil import parser
from corehq.pillows.xform import transform_xform_for_elasticsearch
from corehq.apps.es.fake.es_query_fake import HQESQueryFake
class FormESFake(HQESQueryFake):
_all_docs = []
def domain(self, domain):
return self._filtered(
lambda doc: (doc.get('domain') == domain
or domain in doc.get('domains', [])))
def xmlns(self, xmlns):
return self.term('xmlns.exact', xmlns)
def completed(self, gt=None, gte=None, lt=None, lte=None):
return self.date_range('form.meta.timeEnd', gt, gte, lt, lte)
@staticmethod
def transform_doc(doc):
doc = deepcopy(doc)
doc['xmlns.exact'] = doc.get('xmlns', '')
doc['form.meta.timeEnd'] = parser.parse(doc['form']['meta']['timeEnd'])
return transform_xform_for_elasticsearch(doc)
def count(self):
return len(self._result_docs)
|
<commit_before><commit_msg>Add a fake es forms orm<commit_after>from copy import deepcopy
from dateutil import parser
from corehq.pillows.xform import transform_xform_for_elasticsearch
from corehq.apps.es.fake.es_query_fake import HQESQueryFake
class FormESFake(HQESQueryFake):
_all_docs = []
def domain(self, domain):
return self._filtered(
lambda doc: (doc.get('domain') == domain
or domain in doc.get('domains', [])))
def xmlns(self, xmlns):
return self.term('xmlns.exact', xmlns)
def completed(self, gt=None, gte=None, lt=None, lte=None):
return self.date_range('form.meta.timeEnd', gt, gte, lt, lte)
@staticmethod
def transform_doc(doc):
doc = deepcopy(doc)
doc['xmlns.exact'] = doc.get('xmlns', '')
doc['form.meta.timeEnd'] = parser.parse(doc['form']['meta']['timeEnd'])
return transform_xform_for_elasticsearch(doc)
def count(self):
return len(self._result_docs)
|
|
c179aaeacd8d2b9c78831e6e3cce965b2eb58210
|
spectral_cube/tests/test_cube_utils.py
|
spectral_cube/tests/test_cube_utils.py
|
import pytest
import numpy as np
from astropy import units as u
from astropy import convolution
from astropy.wcs import WCS
from astropy import wcs
from .test_spectral_cube import cube_and_raw
from ..cube_utils import largest_beam, smallest_beam
try:
from radio_beam import beam, Beam
RADIO_BEAM_INSTALLED = True
except ImportError:
RADIO_BEAM_INSTALLED = False
try:
import reproject
REPROJECT_INSTALLED = True
except ImportError:
REPROJECT_INSTALLED = False
@pytest.mark.skipif('not RADIO_BEAM_INSTALLED')
def test_largest_beam():
cube, data = cube_and_raw('522_delta_beams.fits')
large_beam = largest_beam(cube.beams)
assert large_beam == cube.beams[2]
@pytest.mark.skipif('not RADIO_BEAM_INSTALLED')
def test_smallest_beam():
cube, data = cube_and_raw('522_delta_beams.fits')
small_beam = smallest_beam(cube.beams)
assert small_beam == cube.beams[0]
|
Add tests for largest and smallest beams
|
Add tests for largest and smallest beams
|
Python
|
bsd-3-clause
|
jzuhone/spectral-cube,radio-astro-tools/spectral-cube,keflavich/spectral-cube,low-sky/spectral-cube,e-koch/spectral-cube
|
Add tests for largest and smallest beams
|
import pytest
import numpy as np
from astropy import units as u
from astropy import convolution
from astropy.wcs import WCS
from astropy import wcs
from .test_spectral_cube import cube_and_raw
from ..cube_utils import largest_beam, smallest_beam
try:
from radio_beam import beam, Beam
RADIO_BEAM_INSTALLED = True
except ImportError:
RADIO_BEAM_INSTALLED = False
try:
import reproject
REPROJECT_INSTALLED = True
except ImportError:
REPROJECT_INSTALLED = False
@pytest.mark.skipif('not RADIO_BEAM_INSTALLED')
def test_largest_beam():
cube, data = cube_and_raw('522_delta_beams.fits')
large_beam = largest_beam(cube.beams)
assert large_beam == cube.beams[2]
@pytest.mark.skipif('not RADIO_BEAM_INSTALLED')
def test_smallest_beam():
cube, data = cube_and_raw('522_delta_beams.fits')
small_beam = smallest_beam(cube.beams)
assert small_beam == cube.beams[0]
|
<commit_before><commit_msg>Add tests for largest and smallest beams<commit_after>
|
import pytest
import numpy as np
from astropy import units as u
from astropy import convolution
from astropy.wcs import WCS
from astropy import wcs
from .test_spectral_cube import cube_and_raw
from ..cube_utils import largest_beam, smallest_beam
try:
from radio_beam import beam, Beam
RADIO_BEAM_INSTALLED = True
except ImportError:
RADIO_BEAM_INSTALLED = False
try:
import reproject
REPROJECT_INSTALLED = True
except ImportError:
REPROJECT_INSTALLED = False
@pytest.mark.skipif('not RADIO_BEAM_INSTALLED')
def test_largest_beam():
cube, data = cube_and_raw('522_delta_beams.fits')
large_beam = largest_beam(cube.beams)
assert large_beam == cube.beams[2]
@pytest.mark.skipif('not RADIO_BEAM_INSTALLED')
def test_smallest_beam():
cube, data = cube_and_raw('522_delta_beams.fits')
small_beam = smallest_beam(cube.beams)
assert small_beam == cube.beams[0]
|
Add tests for largest and smallest beamsimport pytest
import numpy as np
from astropy import units as u
from astropy import convolution
from astropy.wcs import WCS
from astropy import wcs
from .test_spectral_cube import cube_and_raw
from ..cube_utils import largest_beam, smallest_beam
try:
from radio_beam import beam, Beam
RADIO_BEAM_INSTALLED = True
except ImportError:
RADIO_BEAM_INSTALLED = False
try:
import reproject
REPROJECT_INSTALLED = True
except ImportError:
REPROJECT_INSTALLED = False
@pytest.mark.skipif('not RADIO_BEAM_INSTALLED')
def test_largest_beam():
cube, data = cube_and_raw('522_delta_beams.fits')
large_beam = largest_beam(cube.beams)
assert large_beam == cube.beams[2]
@pytest.mark.skipif('not RADIO_BEAM_INSTALLED')
def test_smallest_beam():
cube, data = cube_and_raw('522_delta_beams.fits')
small_beam = smallest_beam(cube.beams)
assert small_beam == cube.beams[0]
|
<commit_before><commit_msg>Add tests for largest and smallest beams<commit_after>import pytest
import numpy as np
from astropy import units as u
from astropy import convolution
from astropy.wcs import WCS
from astropy import wcs
from .test_spectral_cube import cube_and_raw
from ..cube_utils import largest_beam, smallest_beam
try:
from radio_beam import beam, Beam
RADIO_BEAM_INSTALLED = True
except ImportError:
RADIO_BEAM_INSTALLED = False
try:
import reproject
REPROJECT_INSTALLED = True
except ImportError:
REPROJECT_INSTALLED = False
@pytest.mark.skipif('not RADIO_BEAM_INSTALLED')
def test_largest_beam():
cube, data = cube_and_raw('522_delta_beams.fits')
large_beam = largest_beam(cube.beams)
assert large_beam == cube.beams[2]
@pytest.mark.skipif('not RADIO_BEAM_INSTALLED')
def test_smallest_beam():
cube, data = cube_and_raw('522_delta_beams.fits')
small_beam = smallest_beam(cube.beams)
assert small_beam == cube.beams[0]
|
|
0c53edae37f87c8ef7cd46f5f9ecbf7c3ab3612d
|
scripts/cache_manager.py
|
scripts/cache_manager.py
|
#!/usr/bin/python2
# -*- coding: utf-8 -*-
import os
import cPickle
class CacheManager(object):
"""Manage cache saving and loading for a project"""
def __init__(self, home, tag):
"""Initialize the CacheManager
Args:
home: The home where data to be cached
tag: The tag of the project
"""
super(CacheManager, self).__init__()
self.home = home
self.tag = tag
def load(self, task):
"""Load from cache if data file exists
Args:
task: The task name
Returns ``None`` if no data file exists. Otherwise, the data will be
returned.
"""
try:
fpath = os.path.join(self.home, self.tag + '_' + task + '.pkl')
with open(fpath, 'rb') as f:
data = cPickle.load(f)
except IOError:
data = None
return data
def save(self, task):
"""Save to cache
This is a decorator function that decorates on any function. Its return
value will be saved and then returned again.
Args:
task: The task name
Returns the output of decorated function.
"""
fpath = os.path.join(self.home, self.tag + '_' + task + '.pkl')
def decorator(func):
def wrapper(*args, **kwargs):
data = func(*args, **kwargs)
with open(fpath, 'wb') as f:
cPickle.dump(data, f, protocol=cPickle.HIGHEST_PROTOCOL)
return data
return wrapper
return decorator
|
Manage cache in an elegent way
|
Manage cache in an elegent way
|
Python
|
mit
|
Cysu/Person-Reid,Cysu/Person-Reid,Cysu/Person-Reid,Cysu/Person-Reid,Cysu/Person-Reid
|
Manage cache in an elegent way
|
#!/usr/bin/python2
# -*- coding: utf-8 -*-
import os
import cPickle
class CacheManager(object):
"""Manage cache saving and loading for a project"""
def __init__(self, home, tag):
"""Initialize the CacheManager
Args:
home: The home where data to be cached
tag: The tag of the project
"""
super(CacheManager, self).__init__()
self.home = home
self.tag = tag
def load(self, task):
"""Load from cache if data file exists
Args:
task: The task name
Returns ``None`` if no data file exists. Otherwise, the data will be
returned.
"""
try:
fpath = os.path.join(self.home, self.tag + '_' + task + '.pkl')
with open(fpath, 'rb') as f:
data = cPickle.load(f)
except IOError:
data = None
return data
def save(self, task):
"""Save to cache
This is a decorator function that decorates on any function. Its return
value will be saved and then returned again.
Args:
task: The task name
Returns the output of decorated function.
"""
fpath = os.path.join(self.home, self.tag + '_' + task + '.pkl')
def decorator(func):
def wrapper(*args, **kwargs):
data = func(*args, **kwargs)
with open(fpath, 'wb') as f:
cPickle.dump(data, f, protocol=cPickle.HIGHEST_PROTOCOL)
return data
return wrapper
return decorator
|
<commit_before><commit_msg>Manage cache in an elegent way<commit_after>
|
#!/usr/bin/python2
# -*- coding: utf-8 -*-
import os
import cPickle
class CacheManager(object):
"""Manage cache saving and loading for a project"""
def __init__(self, home, tag):
"""Initialize the CacheManager
Args:
home: The home where data to be cached
tag: The tag of the project
"""
super(CacheManager, self).__init__()
self.home = home
self.tag = tag
def load(self, task):
"""Load from cache if data file exists
Args:
task: The task name
Returns ``None`` if no data file exists. Otherwise, the data will be
returned.
"""
try:
fpath = os.path.join(self.home, self.tag + '_' + task + '.pkl')
with open(fpath, 'rb') as f:
data = cPickle.load(f)
except IOError:
data = None
return data
def save(self, task):
"""Save to cache
This is a decorator function that decorates on any function. Its return
value will be saved and then returned again.
Args:
task: The task name
Returns the output of decorated function.
"""
fpath = os.path.join(self.home, self.tag + '_' + task + '.pkl')
def decorator(func):
def wrapper(*args, **kwargs):
data = func(*args, **kwargs)
with open(fpath, 'wb') as f:
cPickle.dump(data, f, protocol=cPickle.HIGHEST_PROTOCOL)
return data
return wrapper
return decorator
|
Manage cache in an elegent way#!/usr/bin/python2
# -*- coding: utf-8 -*-
import os
import cPickle
class CacheManager(object):
"""Manage cache saving and loading for a project"""
def __init__(self, home, tag):
"""Initialize the CacheManager
Args:
home: The home where data to be cached
tag: The tag of the project
"""
super(CacheManager, self).__init__()
self.home = home
self.tag = tag
def load(self, task):
"""Load from cache if data file exists
Args:
task: The task name
Returns ``None`` if no data file exists. Otherwise, the data will be
returned.
"""
try:
fpath = os.path.join(self.home, self.tag + '_' + task + '.pkl')
with open(fpath, 'rb') as f:
data = cPickle.load(f)
except IOError:
data = None
return data
def save(self, task):
"""Save to cache
This is a decorator function that decorates on any function. Its return
value will be saved and then returned again.
Args:
task: The task name
Returns the output of decorated function.
"""
fpath = os.path.join(self.home, self.tag + '_' + task + '.pkl')
def decorator(func):
def wrapper(*args, **kwargs):
data = func(*args, **kwargs)
with open(fpath, 'wb') as f:
cPickle.dump(data, f, protocol=cPickle.HIGHEST_PROTOCOL)
return data
return wrapper
return decorator
|
<commit_before><commit_msg>Manage cache in an elegent way<commit_after>#!/usr/bin/python2
# -*- coding: utf-8 -*-
import os
import cPickle
class CacheManager(object):
"""Manage cache saving and loading for a project"""
def __init__(self, home, tag):
"""Initialize the CacheManager
Args:
home: The home where data to be cached
tag: The tag of the project
"""
super(CacheManager, self).__init__()
self.home = home
self.tag = tag
def load(self, task):
"""Load from cache if data file exists
Args:
task: The task name
Returns ``None`` if no data file exists. Otherwise, the data will be
returned.
"""
try:
fpath = os.path.join(self.home, self.tag + '_' + task + '.pkl')
with open(fpath, 'rb') as f:
data = cPickle.load(f)
except IOError:
data = None
return data
def save(self, task):
"""Save to cache
This is a decorator function that decorates on any function. Its return
value will be saved and then returned again.
Args:
task: The task name
Returns the output of decorated function.
"""
fpath = os.path.join(self.home, self.tag + '_' + task + '.pkl')
def decorator(func):
def wrapper(*args, **kwargs):
data = func(*args, **kwargs)
with open(fpath, 'wb') as f:
cPickle.dump(data, f, protocol=cPickle.HIGHEST_PROTOCOL)
return data
return wrapper
return decorator
|
|
be10f2588f128d1a15dd6121aa806a98f1f96bf1
|
tests/test_result.py
|
tests/test_result.py
|
import unittest
from performance_testing.result import Result, File
import os
import shutil
class ResultTestCase(unittest.TestCase):
def setUp(self):
self.current_directory = os.path.dirname(os.path.abspath(__file__))
self.result_directory = os.path.join(self.current_directory, 'assets/test_result')
def test_result_init(self):
if os.path.exists(self.result_directory):
shutil.rmtree(self.result_directory)
self.assertFalse(os.path.exists(self.result_directory))
result = Result(directory=self.result_directory)
self.assertTrue(os.path.exists(self.result_directory))
self.assertTrue(os.path.exists(result.file.path))
|
Create tests for result class
|
Create tests for result class
|
Python
|
mit
|
BakeCode/performance-testing,BakeCode/performance-testing
|
Create tests for result class
|
import unittest
from performance_testing.result import Result, File
import os
import shutil
class ResultTestCase(unittest.TestCase):
def setUp(self):
self.current_directory = os.path.dirname(os.path.abspath(__file__))
self.result_directory = os.path.join(self.current_directory, 'assets/test_result')
def test_result_init(self):
if os.path.exists(self.result_directory):
shutil.rmtree(self.result_directory)
self.assertFalse(os.path.exists(self.result_directory))
result = Result(directory=self.result_directory)
self.assertTrue(os.path.exists(self.result_directory))
self.assertTrue(os.path.exists(result.file.path))
|
<commit_before><commit_msg>Create tests for result class<commit_after>
|
import unittest
from performance_testing.result import Result, File
import os
import shutil
class ResultTestCase(unittest.TestCase):
def setUp(self):
self.current_directory = os.path.dirname(os.path.abspath(__file__))
self.result_directory = os.path.join(self.current_directory, 'assets/test_result')
def test_result_init(self):
if os.path.exists(self.result_directory):
shutil.rmtree(self.result_directory)
self.assertFalse(os.path.exists(self.result_directory))
result = Result(directory=self.result_directory)
self.assertTrue(os.path.exists(self.result_directory))
self.assertTrue(os.path.exists(result.file.path))
|
Create tests for result classimport unittest
from performance_testing.result import Result, File
import os
import shutil
class ResultTestCase(unittest.TestCase):
def setUp(self):
self.current_directory = os.path.dirname(os.path.abspath(__file__))
self.result_directory = os.path.join(self.current_directory, 'assets/test_result')
def test_result_init(self):
if os.path.exists(self.result_directory):
shutil.rmtree(self.result_directory)
self.assertFalse(os.path.exists(self.result_directory))
result = Result(directory=self.result_directory)
self.assertTrue(os.path.exists(self.result_directory))
self.assertTrue(os.path.exists(result.file.path))
|
<commit_before><commit_msg>Create tests for result class<commit_after>import unittest
from performance_testing.result import Result, File
import os
import shutil
class ResultTestCase(unittest.TestCase):
def setUp(self):
self.current_directory = os.path.dirname(os.path.abspath(__file__))
self.result_directory = os.path.join(self.current_directory, 'assets/test_result')
def test_result_init(self):
if os.path.exists(self.result_directory):
shutil.rmtree(self.result_directory)
self.assertFalse(os.path.exists(self.result_directory))
result = Result(directory=self.result_directory)
self.assertTrue(os.path.exists(self.result_directory))
self.assertTrue(os.path.exists(result.file.path))
|
|
7f8285b8f92b217d00872dbd1a527d25768fa21c
|
apps/submission/tests/test_views.py
|
apps/submission/tests/test_views.py
|
from django.test import TestCase
from django.urls import reverse
from apps.core.factories import PIXELER_PASSWORD, PixelerFactory
class DownloadXLSXTemplateViewTestCase(TestCase):
def setUp(self):
self.url = reverse('submission:download')
self.template = 'submission/download_xlsx_template.html'
self.simple_user = PixelerFactory(
is_active=True,
is_staff=False,
is_superuser=False,
)
def test_login_required(self):
# User is not logged in, she should be redirected to the login form
response = self.client.get(self.url)
expected_url = '{}?next={}'.format(reverse('login'), self.url)
self.assertRedirects(response, expected_url)
# Log an active user in and then test we are not redirected
self.assertTrue(
self.client.login(
username=self.simple_user.username,
password=PIXELER_PASSWORD,
)
)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, self.template)
def test_context_data(self):
self.assertTrue(
self.client.login(
username=self.simple_user.username,
password=PIXELER_PASSWORD,
)
)
response = self.client.get(self.url)
self.assertEqual(response.context.get('step'), 'download')
self.assertEqual(response.context.get('next_step_url'), '#')
|
Add tests for the DownloadXLSXTemplateView
|
Add tests for the DownloadXLSXTemplateView
|
Python
|
bsd-3-clause
|
Candihub/pixel,Candihub/pixel,Candihub/pixel,Candihub/pixel,Candihub/pixel
|
Add tests for the DownloadXLSXTemplateView
|
from django.test import TestCase
from django.urls import reverse
from apps.core.factories import PIXELER_PASSWORD, PixelerFactory
class DownloadXLSXTemplateViewTestCase(TestCase):
def setUp(self):
self.url = reverse('submission:download')
self.template = 'submission/download_xlsx_template.html'
self.simple_user = PixelerFactory(
is_active=True,
is_staff=False,
is_superuser=False,
)
def test_login_required(self):
# User is not logged in, she should be redirected to the login form
response = self.client.get(self.url)
expected_url = '{}?next={}'.format(reverse('login'), self.url)
self.assertRedirects(response, expected_url)
# Log an active user in and then test we are not redirected
self.assertTrue(
self.client.login(
username=self.simple_user.username,
password=PIXELER_PASSWORD,
)
)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, self.template)
def test_context_data(self):
self.assertTrue(
self.client.login(
username=self.simple_user.username,
password=PIXELER_PASSWORD,
)
)
response = self.client.get(self.url)
self.assertEqual(response.context.get('step'), 'download')
self.assertEqual(response.context.get('next_step_url'), '#')
|
<commit_before><commit_msg>Add tests for the DownloadXLSXTemplateView<commit_after>
|
from django.test import TestCase
from django.urls import reverse
from apps.core.factories import PIXELER_PASSWORD, PixelerFactory
class DownloadXLSXTemplateViewTestCase(TestCase):
def setUp(self):
self.url = reverse('submission:download')
self.template = 'submission/download_xlsx_template.html'
self.simple_user = PixelerFactory(
is_active=True,
is_staff=False,
is_superuser=False,
)
def test_login_required(self):
# User is not logged in, she should be redirected to the login form
response = self.client.get(self.url)
expected_url = '{}?next={}'.format(reverse('login'), self.url)
self.assertRedirects(response, expected_url)
# Log an active user in and then test we are not redirected
self.assertTrue(
self.client.login(
username=self.simple_user.username,
password=PIXELER_PASSWORD,
)
)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, self.template)
def test_context_data(self):
self.assertTrue(
self.client.login(
username=self.simple_user.username,
password=PIXELER_PASSWORD,
)
)
response = self.client.get(self.url)
self.assertEqual(response.context.get('step'), 'download')
self.assertEqual(response.context.get('next_step_url'), '#')
|
Add tests for the DownloadXLSXTemplateViewfrom django.test import TestCase
from django.urls import reverse
from apps.core.factories import PIXELER_PASSWORD, PixelerFactory
class DownloadXLSXTemplateViewTestCase(TestCase):
def setUp(self):
self.url = reverse('submission:download')
self.template = 'submission/download_xlsx_template.html'
self.simple_user = PixelerFactory(
is_active=True,
is_staff=False,
is_superuser=False,
)
def test_login_required(self):
# User is not logged in, she should be redirected to the login form
response = self.client.get(self.url)
expected_url = '{}?next={}'.format(reverse('login'), self.url)
self.assertRedirects(response, expected_url)
# Log an active user in and then test we are not redirected
self.assertTrue(
self.client.login(
username=self.simple_user.username,
password=PIXELER_PASSWORD,
)
)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, self.template)
def test_context_data(self):
self.assertTrue(
self.client.login(
username=self.simple_user.username,
password=PIXELER_PASSWORD,
)
)
response = self.client.get(self.url)
self.assertEqual(response.context.get('step'), 'download')
self.assertEqual(response.context.get('next_step_url'), '#')
|
<commit_before><commit_msg>Add tests for the DownloadXLSXTemplateView<commit_after>from django.test import TestCase
from django.urls import reverse
from apps.core.factories import PIXELER_PASSWORD, PixelerFactory
class DownloadXLSXTemplateViewTestCase(TestCase):
def setUp(self):
self.url = reverse('submission:download')
self.template = 'submission/download_xlsx_template.html'
self.simple_user = PixelerFactory(
is_active=True,
is_staff=False,
is_superuser=False,
)
def test_login_required(self):
# User is not logged in, she should be redirected to the login form
response = self.client.get(self.url)
expected_url = '{}?next={}'.format(reverse('login'), self.url)
self.assertRedirects(response, expected_url)
# Log an active user in and then test we are not redirected
self.assertTrue(
self.client.login(
username=self.simple_user.username,
password=PIXELER_PASSWORD,
)
)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, self.template)
def test_context_data(self):
self.assertTrue(
self.client.login(
username=self.simple_user.username,
password=PIXELER_PASSWORD,
)
)
response = self.client.get(self.url)
self.assertEqual(response.context.get('step'), 'download')
self.assertEqual(response.context.get('next_step_url'), '#')
|
|
8be12a3a28779cf3ad91f732f5dc45bc0843fba1
|
cla_backend/apps/knowledgebase/management/commands/general_and_government_addition.py
|
cla_backend/apps/knowledgebase/management/commands/general_and_government_addition.py
|
from django.core.management.base import BaseCommand
from knowledgebase.models import ArticleCategory
class Command(BaseCommand):
def handle(self, *args, **options):
self.stdout.write("Getting or creating General Article Category")
ArticleCategory.objects.get_or_create(name="General")
self.stdout.write("Getting or creating Government Article Category")
ArticleCategory.objects.get_or_create(name="Government")
|
Add General and Government article categories
|
Add General and Government article categories
|
Python
|
mit
|
ministryofjustice/cla_backend,ministryofjustice/cla_backend,ministryofjustice/cla_backend,ministryofjustice/cla_backend
|
Add General and Government article categories
|
from django.core.management.base import BaseCommand
from knowledgebase.models import ArticleCategory
class Command(BaseCommand):
def handle(self, *args, **options):
self.stdout.write("Getting or creating General Article Category")
ArticleCategory.objects.get_or_create(name="General")
self.stdout.write("Getting or creating Government Article Category")
ArticleCategory.objects.get_or_create(name="Government")
|
<commit_before><commit_msg>Add General and Government article categories<commit_after>
|
from django.core.management.base import BaseCommand
from knowledgebase.models import ArticleCategory
class Command(BaseCommand):
def handle(self, *args, **options):
self.stdout.write("Getting or creating General Article Category")
ArticleCategory.objects.get_or_create(name="General")
self.stdout.write("Getting or creating Government Article Category")
ArticleCategory.objects.get_or_create(name="Government")
|
Add General and Government article categoriesfrom django.core.management.base import BaseCommand
from knowledgebase.models import ArticleCategory
class Command(BaseCommand):
def handle(self, *args, **options):
self.stdout.write("Getting or creating General Article Category")
ArticleCategory.objects.get_or_create(name="General")
self.stdout.write("Getting or creating Government Article Category")
ArticleCategory.objects.get_or_create(name="Government")
|
<commit_before><commit_msg>Add General and Government article categories<commit_after>from django.core.management.base import BaseCommand
from knowledgebase.models import ArticleCategory
class Command(BaseCommand):
def handle(self, *args, **options):
self.stdout.write("Getting or creating General Article Category")
ArticleCategory.objects.get_or_create(name="General")
self.stdout.write("Getting or creating Government Article Category")
ArticleCategory.objects.get_or_create(name="Government")
|
|
ca7fc022df2ce3554a241554ae8da72016a2b015
|
src/helloWorld.py
|
src/helloWorld.py
|
import cv2
cam = cv2.VideoCapture(0)
running = True
while running:
running, frame = cam.read()
if running:
# draw rectangle
cv2.rectangle(frame,(100, 100), (200, 200), (255,0,0), 2)
# write text
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame,'Press Escape to exit...',(10,50), font, 1,(255,255,255),2)
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0XFF == 27: #Escape key pressed
running = False
else:
print '[Error] reading video feed.'
cam.release()
cv2.destroyAllWindows()
|
Add HelloWorld.py to test correct setup
|
Add HelloWorld.py to test correct setup
|
Python
|
mit
|
FroeMic/CDTM-Deep-Learning-Drones,FroeMic/CDTM-Deep-Learning-Drones
|
Add HelloWorld.py to test correct setup
|
import cv2
cam = cv2.VideoCapture(0)
running = True
while running:
running, frame = cam.read()
if running:
# draw rectangle
cv2.rectangle(frame,(100, 100), (200, 200), (255,0,0), 2)
# write text
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame,'Press Escape to exit...',(10,50), font, 1,(255,255,255),2)
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0XFF == 27: #Escape key pressed
running = False
else:
print '[Error] reading video feed.'
cam.release()
cv2.destroyAllWindows()
|
<commit_before><commit_msg>Add HelloWorld.py to test correct setup<commit_after>
|
import cv2
cam = cv2.VideoCapture(0)
running = True
while running:
running, frame = cam.read()
if running:
# draw rectangle
cv2.rectangle(frame,(100, 100), (200, 200), (255,0,0), 2)
# write text
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame,'Press Escape to exit...',(10,50), font, 1,(255,255,255),2)
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0XFF == 27: #Escape key pressed
running = False
else:
print '[Error] reading video feed.'
cam.release()
cv2.destroyAllWindows()
|
Add HelloWorld.py to test correct setupimport cv2
cam = cv2.VideoCapture(0)
running = True
while running:
running, frame = cam.read()
if running:
# draw rectangle
cv2.rectangle(frame,(100, 100), (200, 200), (255,0,0), 2)
# write text
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame,'Press Escape to exit...',(10,50), font, 1,(255,255,255),2)
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0XFF == 27: #Escape key pressed
running = False
else:
print '[Error] reading video feed.'
cam.release()
cv2.destroyAllWindows()
|
<commit_before><commit_msg>Add HelloWorld.py to test correct setup<commit_after>import cv2
cam = cv2.VideoCapture(0)
running = True
while running:
running, frame = cam.read()
if running:
# draw rectangle
cv2.rectangle(frame,(100, 100), (200, 200), (255,0,0), 2)
# write text
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame,'Press Escape to exit...',(10,50), font, 1,(255,255,255),2)
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0XFF == 27: #Escape key pressed
running = False
else:
print '[Error] reading video feed.'
cam.release()
cv2.destroyAllWindows()
|
|
21eb7e06f175a08b4d90146d1bfb48670577e59b
|
bin/analysis/create_static_model.py
|
bin/analysis/create_static_model.py
|
# The old seed pipeline
import logging
import emission.analysis.classification.inference.mode.seed.pipeline as pipeline
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
seed_pipeline = pipeline.ModeInferencePipelineMovesFormat()
seed_pipeline.runPipeline()
|
Check in a simple script to create and save a model based on old-style data
|
Check in a simple script to create and save a model based on old-style data
Since the analysis pipeline has already been defined, this was pretty easy.
And it is even tested.
Testing done: Ran it, there was a json file created.
|
Python
|
bsd-3-clause
|
shankari/e-mission-server,e-mission/e-mission-server,e-mission/e-mission-server,shankari/e-mission-server,e-mission/e-mission-server,shankari/e-mission-server,e-mission/e-mission-server,shankari/e-mission-server
|
Check in a simple script to create and save a model based on old-style data
Since the analysis pipeline has already been defined, this was pretty easy.
And it is even tested.
Testing done: Ran it, there was a json file created.
|
# The old seed pipeline
import logging
import emission.analysis.classification.inference.mode.seed.pipeline as pipeline
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
seed_pipeline = pipeline.ModeInferencePipelineMovesFormat()
seed_pipeline.runPipeline()
|
<commit_before><commit_msg>Check in a simple script to create and save a model based on old-style data
Since the analysis pipeline has already been defined, this was pretty easy.
And it is even tested.
Testing done: Ran it, there was a json file created.<commit_after>
|
# The old seed pipeline
import logging
import emission.analysis.classification.inference.mode.seed.pipeline as pipeline
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
seed_pipeline = pipeline.ModeInferencePipelineMovesFormat()
seed_pipeline.runPipeline()
|
Check in a simple script to create and save a model based on old-style data
Since the analysis pipeline has already been defined, this was pretty easy.
And it is even tested.
Testing done: Ran it, there was a json file created.# The old seed pipeline
import logging
import emission.analysis.classification.inference.mode.seed.pipeline as pipeline
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
seed_pipeline = pipeline.ModeInferencePipelineMovesFormat()
seed_pipeline.runPipeline()
|
<commit_before><commit_msg>Check in a simple script to create and save a model based on old-style data
Since the analysis pipeline has already been defined, this was pretty easy.
And it is even tested.
Testing done: Ran it, there was a json file created.<commit_after># The old seed pipeline
import logging
import emission.analysis.classification.inference.mode.seed.pipeline as pipeline
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
seed_pipeline = pipeline.ModeInferencePipelineMovesFormat()
seed_pipeline.runPipeline()
|
|
c2a8863d579830ae63131bef5fdd4320720e05c7
|
services/mixins/utils.py
|
services/mixins/utils.py
|
from services.mixins.constants import LICENSE_UNKNOWN, LICENSE_CC0, LICENSE_CC_BY, LICENSE_CC_BY_NC, \
LICENSE_CC_BY_NC_ND, LICENSE_CC_BY_NC_SA, LICENSE_CC_BY_ND, LICENSE_CC_BY_SA, LICENSE_CC_SAMPLING_PLUS
def translate_cc_license_url(url):
# TODO: this does not include license versioning (3.0, 4.0...)
if '/by/' in url: return LICENSE_CC_BY
if '/by-nc/' in url: return LICENSE_CC_BY_NC
if '/by-nd/' in url: return LICENSE_CC_BY_ND
if '/by-sa/' in url: return LICENSE_CC_BY_SA
if '/by-nc-sa/' in url: return LICENSE_CC_BY_NC_SA
if '/by-nc-nd/' in url: return LICENSE_CC_BY_NC_ND
if '/zero/' in url: return LICENSE_CC0
if '/publicdomain/' in url: return LICENSE_CC0
if '/sampling+/' in url: return LICENSE_CC_SAMPLING_PLUS
return LICENSE_UNKNOWN
|
Add util function to translate from cc license urls
|
Add util function to translate from cc license urls
|
Python
|
apache-2.0
|
AudioCommons/ac-mediator,AudioCommons/ac-mediator,AudioCommons/ac-mediator
|
Add util function to translate from cc license urls
|
from services.mixins.constants import LICENSE_UNKNOWN, LICENSE_CC0, LICENSE_CC_BY, LICENSE_CC_BY_NC, \
LICENSE_CC_BY_NC_ND, LICENSE_CC_BY_NC_SA, LICENSE_CC_BY_ND, LICENSE_CC_BY_SA, LICENSE_CC_SAMPLING_PLUS
def translate_cc_license_url(url):
# TODO: this does not include license versioning (3.0, 4.0...)
if '/by/' in url: return LICENSE_CC_BY
if '/by-nc/' in url: return LICENSE_CC_BY_NC
if '/by-nd/' in url: return LICENSE_CC_BY_ND
if '/by-sa/' in url: return LICENSE_CC_BY_SA
if '/by-nc-sa/' in url: return LICENSE_CC_BY_NC_SA
if '/by-nc-nd/' in url: return LICENSE_CC_BY_NC_ND
if '/zero/' in url: return LICENSE_CC0
if '/publicdomain/' in url: return LICENSE_CC0
if '/sampling+/' in url: return LICENSE_CC_SAMPLING_PLUS
return LICENSE_UNKNOWN
|
<commit_before><commit_msg>Add util function to translate from cc license urls<commit_after>
|
from services.mixins.constants import LICENSE_UNKNOWN, LICENSE_CC0, LICENSE_CC_BY, LICENSE_CC_BY_NC, \
LICENSE_CC_BY_NC_ND, LICENSE_CC_BY_NC_SA, LICENSE_CC_BY_ND, LICENSE_CC_BY_SA, LICENSE_CC_SAMPLING_PLUS
def translate_cc_license_url(url):
# TODO: this does not include license versioning (3.0, 4.0...)
if '/by/' in url: return LICENSE_CC_BY
if '/by-nc/' in url: return LICENSE_CC_BY_NC
if '/by-nd/' in url: return LICENSE_CC_BY_ND
if '/by-sa/' in url: return LICENSE_CC_BY_SA
if '/by-nc-sa/' in url: return LICENSE_CC_BY_NC_SA
if '/by-nc-nd/' in url: return LICENSE_CC_BY_NC_ND
if '/zero/' in url: return LICENSE_CC0
if '/publicdomain/' in url: return LICENSE_CC0
if '/sampling+/' in url: return LICENSE_CC_SAMPLING_PLUS
return LICENSE_UNKNOWN
|
Add util function to translate from cc license urlsfrom services.mixins.constants import LICENSE_UNKNOWN, LICENSE_CC0, LICENSE_CC_BY, LICENSE_CC_BY_NC, \
LICENSE_CC_BY_NC_ND, LICENSE_CC_BY_NC_SA, LICENSE_CC_BY_ND, LICENSE_CC_BY_SA, LICENSE_CC_SAMPLING_PLUS
def translate_cc_license_url(url):
# TODO: this does not include license versioning (3.0, 4.0...)
if '/by/' in url: return LICENSE_CC_BY
if '/by-nc/' in url: return LICENSE_CC_BY_NC
if '/by-nd/' in url: return LICENSE_CC_BY_ND
if '/by-sa/' in url: return LICENSE_CC_BY_SA
if '/by-nc-sa/' in url: return LICENSE_CC_BY_NC_SA
if '/by-nc-nd/' in url: return LICENSE_CC_BY_NC_ND
if '/zero/' in url: return LICENSE_CC0
if '/publicdomain/' in url: return LICENSE_CC0
if '/sampling+/' in url: return LICENSE_CC_SAMPLING_PLUS
return LICENSE_UNKNOWN
|
<commit_before><commit_msg>Add util function to translate from cc license urls<commit_after>from services.mixins.constants import LICENSE_UNKNOWN, LICENSE_CC0, LICENSE_CC_BY, LICENSE_CC_BY_NC, \
LICENSE_CC_BY_NC_ND, LICENSE_CC_BY_NC_SA, LICENSE_CC_BY_ND, LICENSE_CC_BY_SA, LICENSE_CC_SAMPLING_PLUS
def translate_cc_license_url(url):
# TODO: this does not include license versioning (3.0, 4.0...)
if '/by/' in url: return LICENSE_CC_BY
if '/by-nc/' in url: return LICENSE_CC_BY_NC
if '/by-nd/' in url: return LICENSE_CC_BY_ND
if '/by-sa/' in url: return LICENSE_CC_BY_SA
if '/by-nc-sa/' in url: return LICENSE_CC_BY_NC_SA
if '/by-nc-nd/' in url: return LICENSE_CC_BY_NC_ND
if '/zero/' in url: return LICENSE_CC0
if '/publicdomain/' in url: return LICENSE_CC0
if '/sampling+/' in url: return LICENSE_CC_SAMPLING_PLUS
return LICENSE_UNKNOWN
|
|
9ce809a4067d311c23dd1fa4f39b5808c885afdc
|
scripts/analytics/run_keen_snapshots.py
|
scripts/analytics/run_keen_snapshots.py
|
import logging
from datetime import datetime
from keen.client import KeenClient
from website.app import init_app
from website.settings import KEEN as keen_settings
from scripts.analytics.addon_snapshot import get_events as addon_events
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def gather_snapshot_events():
today = datetime.datetime.utcnow().date()
logger.info('<---- Gatheirng snapshot data for right now: {} ---->'.format(today.isoformat()))
keen_events = {}
keen_events.update({'addon_analytics': addon_events()})
return keen_events
def main():
""" Gathers a snapshot of analytics at the time the script was run,
and only for that time. Cannot be back-dated.
"""
keen_project = keen_settings['private']['project_id']
write_key = keen_settings['private']['write_key']
if keen_project and write_key:
client = KeenClient(
project_id=keen_project,
write_key=write_key,
)
assert(client)
keen_events = gather_snapshot_events()
client.add_events(keen_events)
if __name__ == '__main__':
init_app()
main()
|
Add other base script that will run addon and other snapshot scripts
|
Add other base script that will run addon and other snapshot scripts
|
Python
|
apache-2.0
|
chennan47/osf.io,acshi/osf.io,Nesiehr/osf.io,Johnetordoff/osf.io,felliott/osf.io,mattclark/osf.io,hmoco/osf.io,pattisdr/osf.io,CenterForOpenScience/osf.io,monikagrabowska/osf.io,acshi/osf.io,alexschiller/osf.io,mfraezz/osf.io,mluo613/osf.io,mfraezz/osf.io,erinspace/osf.io,mattclark/osf.io,chrisseto/osf.io,hmoco/osf.io,chennan47/osf.io,HalcyonChimera/osf.io,Johnetordoff/osf.io,adlius/osf.io,mluo613/osf.io,binoculars/osf.io,mattclark/osf.io,chennan47/osf.io,sloria/osf.io,acshi/osf.io,alexschiller/osf.io,sloria/osf.io,caseyrollins/osf.io,adlius/osf.io,TomBaxter/osf.io,alexschiller/osf.io,cwisecarver/osf.io,caneruguz/osf.io,caneruguz/osf.io,acshi/osf.io,aaxelb/osf.io,leb2dg/osf.io,HalcyonChimera/osf.io,sloria/osf.io,Nesiehr/osf.io,cwisecarver/osf.io,Nesiehr/osf.io,cslzchen/osf.io,HalcyonChimera/osf.io,binoculars/osf.io,saradbowman/osf.io,aaxelb/osf.io,leb2dg/osf.io,icereval/osf.io,leb2dg/osf.io,cwisecarver/osf.io,felliott/osf.io,alexschiller/osf.io,caseyrollins/osf.io,rdhyee/osf.io,icereval/osf.io,monikagrabowska/osf.io,rdhyee/osf.io,crcresearch/osf.io,CenterForOpenScience/osf.io,Johnetordoff/osf.io,crcresearch/osf.io,baylee-d/osf.io,saradbowman/osf.io,baylee-d/osf.io,chrisseto/osf.io,Nesiehr/osf.io,rdhyee/osf.io,felliott/osf.io,monikagrabowska/osf.io,TomBaxter/osf.io,Johnetordoff/osf.io,mluo613/osf.io,brianjgeiger/osf.io,cslzchen/osf.io,baylee-d/osf.io,hmoco/osf.io,laurenrevere/osf.io,brianjgeiger/osf.io,mluo613/osf.io,cslzchen/osf.io,brianjgeiger/osf.io,brianjgeiger/osf.io,HalcyonChimera/osf.io,monikagrabowska/osf.io,TomBaxter/osf.io,monikagrabowska/osf.io,mfraezz/osf.io,crcresearch/osf.io,caneruguz/osf.io,erinspace/osf.io,cwisecarver/osf.io,aaxelb/osf.io,alexschiller/osf.io,erinspace/osf.io,mfraezz/osf.io,cslzchen/osf.io,CenterForOpenScience/osf.io,caneruguz/osf.io,pattisdr/osf.io,acshi/osf.io,CenterForOpenScience/osf.io,binoculars/osf.io,laurenrevere/osf.io,laurenrevere/osf.io,felliott/osf.io,adlius/osf.io,chrisseto/osf.io,rdhyee/osf.io,adlius/osf.io,hmoco/osf.io,icereval/osf.io,aaxelb/osf.io,caseyrollins/osf.io,pattisdr/osf.io,chrisseto/osf.io,leb2dg/osf.io,mluo613/osf.io
|
Add other base script that will run addon and other snapshot scripts
|
import logging
from datetime import datetime
from keen.client import KeenClient
from website.app import init_app
from website.settings import KEEN as keen_settings
from scripts.analytics.addon_snapshot import get_events as addon_events
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def gather_snapshot_events():
today = datetime.datetime.utcnow().date()
logger.info('<---- Gatheirng snapshot data for right now: {} ---->'.format(today.isoformat()))
keen_events = {}
keen_events.update({'addon_analytics': addon_events()})
return keen_events
def main():
""" Gathers a snapshot of analytics at the time the script was run,
and only for that time. Cannot be back-dated.
"""
keen_project = keen_settings['private']['project_id']
write_key = keen_settings['private']['write_key']
if keen_project and write_key:
client = KeenClient(
project_id=keen_project,
write_key=write_key,
)
assert(client)
keen_events = gather_snapshot_events()
client.add_events(keen_events)
if __name__ == '__main__':
init_app()
main()
|
<commit_before><commit_msg>Add other base script that will run addon and other snapshot scripts<commit_after>
|
import logging
from datetime import datetime
from keen.client import KeenClient
from website.app import init_app
from website.settings import KEEN as keen_settings
from scripts.analytics.addon_snapshot import get_events as addon_events
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def gather_snapshot_events():
today = datetime.datetime.utcnow().date()
logger.info('<---- Gatheirng snapshot data for right now: {} ---->'.format(today.isoformat()))
keen_events = {}
keen_events.update({'addon_analytics': addon_events()})
return keen_events
def main():
""" Gathers a snapshot of analytics at the time the script was run,
and only for that time. Cannot be back-dated.
"""
keen_project = keen_settings['private']['project_id']
write_key = keen_settings['private']['write_key']
if keen_project and write_key:
client = KeenClient(
project_id=keen_project,
write_key=write_key,
)
assert(client)
keen_events = gather_snapshot_events()
client.add_events(keen_events)
if __name__ == '__main__':
init_app()
main()
|
Add other base script that will run addon and other snapshot scriptsimport logging
from datetime import datetime
from keen.client import KeenClient
from website.app import init_app
from website.settings import KEEN as keen_settings
from scripts.analytics.addon_snapshot import get_events as addon_events
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def gather_snapshot_events():
today = datetime.datetime.utcnow().date()
logger.info('<---- Gatheirng snapshot data for right now: {} ---->'.format(today.isoformat()))
keen_events = {}
keen_events.update({'addon_analytics': addon_events()})
return keen_events
def main():
""" Gathers a snapshot of analytics at the time the script was run,
and only for that time. Cannot be back-dated.
"""
keen_project = keen_settings['private']['project_id']
write_key = keen_settings['private']['write_key']
if keen_project and write_key:
client = KeenClient(
project_id=keen_project,
write_key=write_key,
)
assert(client)
keen_events = gather_snapshot_events()
client.add_events(keen_events)
if __name__ == '__main__':
init_app()
main()
|
<commit_before><commit_msg>Add other base script that will run addon and other snapshot scripts<commit_after>import logging
from datetime import datetime
from keen.client import KeenClient
from website.app import init_app
from website.settings import KEEN as keen_settings
from scripts.analytics.addon_snapshot import get_events as addon_events
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def gather_snapshot_events():
today = datetime.datetime.utcnow().date()
logger.info('<---- Gatheirng snapshot data for right now: {} ---->'.format(today.isoformat()))
keen_events = {}
keen_events.update({'addon_analytics': addon_events()})
return keen_events
def main():
""" Gathers a snapshot of analytics at the time the script was run,
and only for that time. Cannot be back-dated.
"""
keen_project = keen_settings['private']['project_id']
write_key = keen_settings['private']['write_key']
if keen_project and write_key:
client = KeenClient(
project_id=keen_project,
write_key=write_key,
)
assert(client)
keen_events = gather_snapshot_events()
client.add_events(keen_events)
if __name__ == '__main__':
init_app()
main()
|
|
c2ded8bea952162ba38e9378c5d3ddaace9bea20
|
tests/test_toggle_livesync_command.py
|
tests/test_toggle_livesync_command.py
|
from unittest import TestCase
from unittest.mock import MagicMock, patch
from sys import modules
class TestLiveSync(TestCase):
@patch('nativescript-plugin.toggle_livesync_command.ToggleLiveSyncNsCommand.on_finished', side_effect=lambda succeded: None)
def test_toggle_livesync_command_when_project_is_none_should_return_none(self, on_finished):
toggle_livesync_command = modules["nativescript-plugin.toggle_livesync_command"].ToggleLiveSyncNsCommand()
self.assertIsNone(toggle_livesync_command.execute(None, {}))
@patch('nativescript-plugin.toggle_livesync_command.ToggleLiveSyncNsCommand.on_finished', side_effect=lambda succeded: None)
def test_toggle_livesync_command_when_device_is_none_should_return_none(self, on_finished):
toggle_livesync_command = modules["nativescript-plugin.toggle_livesync_command"].ToggleLiveSyncNsCommand()
self.assertIsNone(toggle_livesync_command.execute("some_project_path", None))
def test_toggle_livesync_command_should_pass_correct_parameters(self):
device_mock = {"platform": "Android", "identifier": "192.168.56.101:5555"}
project_path = "some_project_path"
toggle_livesync_command = modules["nativescript-plugin.toggle_livesync_command"].ToggleLiveSyncNsCommand()
toggle_livesync_command.run_command = MagicMock()
toggle_livesync_command.execute(project_path, device_mock)
toggle_livesync_command.run_command.assert_called_with(["livesync",
device_mock["platform"],
"--watch",
"--path", project_path,
"--device",
device_mock["identifier"]],
False)
def test_toggle_livesync_command_when_called_for_second_time_should_terminate_process(self):
device_mock = {"platform": "Android", "identifier": "192.168.56.101:5555"}
project_path = "some_project_path"
toggle_livesync_command = modules["nativescript-plugin.toggle_livesync_command"].ToggleLiveSyncNsCommand()
terminate_called = False
class MockThread():
def terminate(self):
nonlocal terminate_called
terminate_called = True
mock_thread = MockThread()
toggle_livesync_command.run_command = MagicMock(return_value=MockThread())
toggle_livesync_command.execute(project_path, device_mock)
toggle_livesync_command.run_command.assert_called_with(["livesync",
device_mock["platform"],
"--watch",
"--path", project_path,
"--device",
device_mock["identifier"]],
False)
toggle_livesync_command.execute(project_path, device_mock)
self.assertTrue(terminate_called)
if __name__ == '__main__':
unittest.main()
|
Add tests for Toggle LiveSync command
|
Add tests for Toggle LiveSync command
|
Python
|
apache-2.0
|
Mitko-Kerezov/ns-sublime-plugin,Mitko-Kerezov/ns-sublime-plugin
|
Add tests for Toggle LiveSync command
|
from unittest import TestCase
from unittest.mock import MagicMock, patch
from sys import modules
class TestLiveSync(TestCase):
@patch('nativescript-plugin.toggle_livesync_command.ToggleLiveSyncNsCommand.on_finished', side_effect=lambda succeded: None)
def test_toggle_livesync_command_when_project_is_none_should_return_none(self, on_finished):
toggle_livesync_command = modules["nativescript-plugin.toggle_livesync_command"].ToggleLiveSyncNsCommand()
self.assertIsNone(toggle_livesync_command.execute(None, {}))
@patch('nativescript-plugin.toggle_livesync_command.ToggleLiveSyncNsCommand.on_finished', side_effect=lambda succeded: None)
def test_toggle_livesync_command_when_device_is_none_should_return_none(self, on_finished):
toggle_livesync_command = modules["nativescript-plugin.toggle_livesync_command"].ToggleLiveSyncNsCommand()
self.assertIsNone(toggle_livesync_command.execute("some_project_path", None))
def test_toggle_livesync_command_should_pass_correct_parameters(self):
device_mock = {"platform": "Android", "identifier": "192.168.56.101:5555"}
project_path = "some_project_path"
toggle_livesync_command = modules["nativescript-plugin.toggle_livesync_command"].ToggleLiveSyncNsCommand()
toggle_livesync_command.run_command = MagicMock()
toggle_livesync_command.execute(project_path, device_mock)
toggle_livesync_command.run_command.assert_called_with(["livesync",
device_mock["platform"],
"--watch",
"--path", project_path,
"--device",
device_mock["identifier"]],
False)
def test_toggle_livesync_command_when_called_for_second_time_should_terminate_process(self):
device_mock = {"platform": "Android", "identifier": "192.168.56.101:5555"}
project_path = "some_project_path"
toggle_livesync_command = modules["nativescript-plugin.toggle_livesync_command"].ToggleLiveSyncNsCommand()
terminate_called = False
class MockThread():
def terminate(self):
nonlocal terminate_called
terminate_called = True
mock_thread = MockThread()
toggle_livesync_command.run_command = MagicMock(return_value=MockThread())
toggle_livesync_command.execute(project_path, device_mock)
toggle_livesync_command.run_command.assert_called_with(["livesync",
device_mock["platform"],
"--watch",
"--path", project_path,
"--device",
device_mock["identifier"]],
False)
toggle_livesync_command.execute(project_path, device_mock)
self.assertTrue(terminate_called)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add tests for Toggle LiveSync command<commit_after>
|
from unittest import TestCase
from unittest.mock import MagicMock, patch
from sys import modules
class TestLiveSync(TestCase):
@patch('nativescript-plugin.toggle_livesync_command.ToggleLiveSyncNsCommand.on_finished', side_effect=lambda succeded: None)
def test_toggle_livesync_command_when_project_is_none_should_return_none(self, on_finished):
toggle_livesync_command = modules["nativescript-plugin.toggle_livesync_command"].ToggleLiveSyncNsCommand()
self.assertIsNone(toggle_livesync_command.execute(None, {}))
@patch('nativescript-plugin.toggle_livesync_command.ToggleLiveSyncNsCommand.on_finished', side_effect=lambda succeded: None)
def test_toggle_livesync_command_when_device_is_none_should_return_none(self, on_finished):
toggle_livesync_command = modules["nativescript-plugin.toggle_livesync_command"].ToggleLiveSyncNsCommand()
self.assertIsNone(toggle_livesync_command.execute("some_project_path", None))
def test_toggle_livesync_command_should_pass_correct_parameters(self):
device_mock = {"platform": "Android", "identifier": "192.168.56.101:5555"}
project_path = "some_project_path"
toggle_livesync_command = modules["nativescript-plugin.toggle_livesync_command"].ToggleLiveSyncNsCommand()
toggle_livesync_command.run_command = MagicMock()
toggle_livesync_command.execute(project_path, device_mock)
toggle_livesync_command.run_command.assert_called_with(["livesync",
device_mock["platform"],
"--watch",
"--path", project_path,
"--device",
device_mock["identifier"]],
False)
def test_toggle_livesync_command_when_called_for_second_time_should_terminate_process(self):
device_mock = {"platform": "Android", "identifier": "192.168.56.101:5555"}
project_path = "some_project_path"
toggle_livesync_command = modules["nativescript-plugin.toggle_livesync_command"].ToggleLiveSyncNsCommand()
terminate_called = False
class MockThread():
def terminate(self):
nonlocal terminate_called
terminate_called = True
mock_thread = MockThread()
toggle_livesync_command.run_command = MagicMock(return_value=MockThread())
toggle_livesync_command.execute(project_path, device_mock)
toggle_livesync_command.run_command.assert_called_with(["livesync",
device_mock["platform"],
"--watch",
"--path", project_path,
"--device",
device_mock["identifier"]],
False)
toggle_livesync_command.execute(project_path, device_mock)
self.assertTrue(terminate_called)
if __name__ == '__main__':
unittest.main()
|
Add tests for Toggle LiveSync commandfrom unittest import TestCase
from unittest.mock import MagicMock, patch
from sys import modules
class TestLiveSync(TestCase):
@patch('nativescript-plugin.toggle_livesync_command.ToggleLiveSyncNsCommand.on_finished', side_effect=lambda succeded: None)
def test_toggle_livesync_command_when_project_is_none_should_return_none(self, on_finished):
toggle_livesync_command = modules["nativescript-plugin.toggle_livesync_command"].ToggleLiveSyncNsCommand()
self.assertIsNone(toggle_livesync_command.execute(None, {}))
@patch('nativescript-plugin.toggle_livesync_command.ToggleLiveSyncNsCommand.on_finished', side_effect=lambda succeded: None)
def test_toggle_livesync_command_when_device_is_none_should_return_none(self, on_finished):
toggle_livesync_command = modules["nativescript-plugin.toggle_livesync_command"].ToggleLiveSyncNsCommand()
self.assertIsNone(toggle_livesync_command.execute("some_project_path", None))
def test_toggle_livesync_command_should_pass_correct_parameters(self):
device_mock = {"platform": "Android", "identifier": "192.168.56.101:5555"}
project_path = "some_project_path"
toggle_livesync_command = modules["nativescript-plugin.toggle_livesync_command"].ToggleLiveSyncNsCommand()
toggle_livesync_command.run_command = MagicMock()
toggle_livesync_command.execute(project_path, device_mock)
toggle_livesync_command.run_command.assert_called_with(["livesync",
device_mock["platform"],
"--watch",
"--path", project_path,
"--device",
device_mock["identifier"]],
False)
def test_toggle_livesync_command_when_called_for_second_time_should_terminate_process(self):
device_mock = {"platform": "Android", "identifier": "192.168.56.101:5555"}
project_path = "some_project_path"
toggle_livesync_command = modules["nativescript-plugin.toggle_livesync_command"].ToggleLiveSyncNsCommand()
terminate_called = False
class MockThread():
def terminate(self):
nonlocal terminate_called
terminate_called = True
mock_thread = MockThread()
toggle_livesync_command.run_command = MagicMock(return_value=MockThread())
toggle_livesync_command.execute(project_path, device_mock)
toggle_livesync_command.run_command.assert_called_with(["livesync",
device_mock["platform"],
"--watch",
"--path", project_path,
"--device",
device_mock["identifier"]],
False)
toggle_livesync_command.execute(project_path, device_mock)
self.assertTrue(terminate_called)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add tests for Toggle LiveSync command<commit_after>from unittest import TestCase
from unittest.mock import MagicMock, patch
from sys import modules
class TestLiveSync(TestCase):
@patch('nativescript-plugin.toggle_livesync_command.ToggleLiveSyncNsCommand.on_finished', side_effect=lambda succeded: None)
def test_toggle_livesync_command_when_project_is_none_should_return_none(self, on_finished):
toggle_livesync_command = modules["nativescript-plugin.toggle_livesync_command"].ToggleLiveSyncNsCommand()
self.assertIsNone(toggle_livesync_command.execute(None, {}))
@patch('nativescript-plugin.toggle_livesync_command.ToggleLiveSyncNsCommand.on_finished', side_effect=lambda succeded: None)
def test_toggle_livesync_command_when_device_is_none_should_return_none(self, on_finished):
toggle_livesync_command = modules["nativescript-plugin.toggle_livesync_command"].ToggleLiveSyncNsCommand()
self.assertIsNone(toggle_livesync_command.execute("some_project_path", None))
def test_toggle_livesync_command_should_pass_correct_parameters(self):
device_mock = {"platform": "Android", "identifier": "192.168.56.101:5555"}
project_path = "some_project_path"
toggle_livesync_command = modules["nativescript-plugin.toggle_livesync_command"].ToggleLiveSyncNsCommand()
toggle_livesync_command.run_command = MagicMock()
toggle_livesync_command.execute(project_path, device_mock)
toggle_livesync_command.run_command.assert_called_with(["livesync",
device_mock["platform"],
"--watch",
"--path", project_path,
"--device",
device_mock["identifier"]],
False)
def test_toggle_livesync_command_when_called_for_second_time_should_terminate_process(self):
device_mock = {"platform": "Android", "identifier": "192.168.56.101:5555"}
project_path = "some_project_path"
toggle_livesync_command = modules["nativescript-plugin.toggle_livesync_command"].ToggleLiveSyncNsCommand()
terminate_called = False
class MockThread():
def terminate(self):
nonlocal terminate_called
terminate_called = True
mock_thread = MockThread()
toggle_livesync_command.run_command = MagicMock(return_value=MockThread())
toggle_livesync_command.execute(project_path, device_mock)
toggle_livesync_command.run_command.assert_called_with(["livesync",
device_mock["platform"],
"--watch",
"--path", project_path,
"--device",
device_mock["identifier"]],
False)
toggle_livesync_command.execute(project_path, device_mock)
self.assertTrue(terminate_called)
if __name__ == '__main__':
unittest.main()
|
|
f1fc7e4f2200863535ffbc3cb47b1b0f7375b943
|
analysis/check_serum_strain_serum_id_match.py
|
analysis/check_serum_strain_serum_id_match.py
|
# Open downloaded titer table and check matches between serum_strain and serum_id
# There should only be a single serum_strain for each serum_id
# There can be multiple serum_ids for each serum_strain
import argparse
import csv
parser = argparse.ArgumentParser()
parser.add_argument('infile', default=None, type=str, help="file to test")
if __name__=="__main__":
args = parser.parse_args()
id_to_strain_mapping = {}
if args.infile:
with open(args.infile) as fd:
rd = csv.reader(fd, delimiter="\t", quotechar='"')
for row in rd:
serum_strain = row[1] # second row is serum serum_strain
serum_id = row[2] # third row is serum_id
if serum_id in id_to_strain_mapping:
id_to_strain_mapping[serum_id].add(serum_strain)
else:
id_to_strain_mapping[serum_id] = set([serum_strain])
print("ALL SERUM_IDS")
print(id_to_strain_mapping)
print()
print("PROBLEMATIC SERUM_IDS")
for serum_id, serum_strains in id_to_strain_mapping.items():
if len(serum_strains)>1:
print("serum_id", serum_id)
print("serum_strains", serum_strains)
|
Include analysis script to test for broken serum_id / serum_strain matches
|
Include analysis script to test for broken serum_id / serum_strain matches
|
Python
|
agpl-3.0
|
nextstrain/fauna,blab/nextstrain-db,blab/nextstrain-db,nextstrain/fauna
|
Include analysis script to test for broken serum_id / serum_strain matches
|
# Open downloaded titer table and check matches between serum_strain and serum_id
# There should only be a single serum_strain for each serum_id
# There can be multiple serum_ids for each serum_strain
import argparse
import csv
parser = argparse.ArgumentParser()
parser.add_argument('infile', default=None, type=str, help="file to test")
if __name__=="__main__":
args = parser.parse_args()
id_to_strain_mapping = {}
if args.infile:
with open(args.infile) as fd:
rd = csv.reader(fd, delimiter="\t", quotechar='"')
for row in rd:
serum_strain = row[1] # second row is serum serum_strain
serum_id = row[2] # third row is serum_id
if serum_id in id_to_strain_mapping:
id_to_strain_mapping[serum_id].add(serum_strain)
else:
id_to_strain_mapping[serum_id] = set([serum_strain])
print("ALL SERUM_IDS")
print(id_to_strain_mapping)
print()
print("PROBLEMATIC SERUM_IDS")
for serum_id, serum_strains in id_to_strain_mapping.items():
if len(serum_strains)>1:
print("serum_id", serum_id)
print("serum_strains", serum_strains)
|
<commit_before><commit_msg>Include analysis script to test for broken serum_id / serum_strain matches<commit_after>
|
# Open downloaded titer table and check matches between serum_strain and serum_id
# There should only be a single serum_strain for each serum_id
# There can be multiple serum_ids for each serum_strain
import argparse
import csv
parser = argparse.ArgumentParser()
parser.add_argument('infile', default=None, type=str, help="file to test")
if __name__=="__main__":
args = parser.parse_args()
id_to_strain_mapping = {}
if args.infile:
with open(args.infile) as fd:
rd = csv.reader(fd, delimiter="\t", quotechar='"')
for row in rd:
serum_strain = row[1] # second row is serum serum_strain
serum_id = row[2] # third row is serum_id
if serum_id in id_to_strain_mapping:
id_to_strain_mapping[serum_id].add(serum_strain)
else:
id_to_strain_mapping[serum_id] = set([serum_strain])
print("ALL SERUM_IDS")
print(id_to_strain_mapping)
print()
print("PROBLEMATIC SERUM_IDS")
for serum_id, serum_strains in id_to_strain_mapping.items():
if len(serum_strains)>1:
print("serum_id", serum_id)
print("serum_strains", serum_strains)
|
Include analysis script to test for broken serum_id / serum_strain matches# Open downloaded titer table and check matches between serum_strain and serum_id
# There should only be a single serum_strain for each serum_id
# There can be multiple serum_ids for each serum_strain
import argparse
import csv
parser = argparse.ArgumentParser()
parser.add_argument('infile', default=None, type=str, help="file to test")
if __name__=="__main__":
args = parser.parse_args()
id_to_strain_mapping = {}
if args.infile:
with open(args.infile) as fd:
rd = csv.reader(fd, delimiter="\t", quotechar='"')
for row in rd:
serum_strain = row[1] # second row is serum serum_strain
serum_id = row[2] # third row is serum_id
if serum_id in id_to_strain_mapping:
id_to_strain_mapping[serum_id].add(serum_strain)
else:
id_to_strain_mapping[serum_id] = set([serum_strain])
print("ALL SERUM_IDS")
print(id_to_strain_mapping)
print()
print("PROBLEMATIC SERUM_IDS")
for serum_id, serum_strains in id_to_strain_mapping.items():
if len(serum_strains)>1:
print("serum_id", serum_id)
print("serum_strains", serum_strains)
|
<commit_before><commit_msg>Include analysis script to test for broken serum_id / serum_strain matches<commit_after># Open downloaded titer table and check matches between serum_strain and serum_id
# There should only be a single serum_strain for each serum_id
# There can be multiple serum_ids for each serum_strain
import argparse
import csv
parser = argparse.ArgumentParser()
parser.add_argument('infile', default=None, type=str, help="file to test")
if __name__=="__main__":
args = parser.parse_args()
id_to_strain_mapping = {}
if args.infile:
with open(args.infile) as fd:
rd = csv.reader(fd, delimiter="\t", quotechar='"')
for row in rd:
serum_strain = row[1] # second row is serum serum_strain
serum_id = row[2] # third row is serum_id
if serum_id in id_to_strain_mapping:
id_to_strain_mapping[serum_id].add(serum_strain)
else:
id_to_strain_mapping[serum_id] = set([serum_strain])
print("ALL SERUM_IDS")
print(id_to_strain_mapping)
print()
print("PROBLEMATIC SERUM_IDS")
for serum_id, serum_strains in id_to_strain_mapping.items():
if len(serum_strains)>1:
print("serum_id", serum_id)
print("serum_strains", serum_strains)
|
|
3d7707d20c299358476cca01babf14c7cacddb50
|
smaug/tests/fullstack/test_providers.py
|
smaug/tests/fullstack/test_providers.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from smaug.tests.fullstack import smaug_base
class ProvidersTest(smaug_base.SmaugBaseTest):
"""Test Providers operation"""
provider_id = u"cf56bd3e-97a7-4078-b6d5-f36246333fd9"
def test_providers_list(self):
provider_res = self.smaug_client.providers.list()
self.assertEqual(1, len(provider_res))
def test_provider_get(self):
provider_res = self.smaug_client.providers.get(self.provider_id)
self.assertEqual("OS Infra Provider", provider_res.name)
|
Add fullstack tests of the resource providers
|
Add fullstack tests of the resource providers
Change-Id: Ie4f769de3060fdb279320637ba965d5b452e2a2d
Closes-Bug: #1578889
|
Python
|
apache-2.0
|
openstack/smaug,openstack/smaug
|
Add fullstack tests of the resource providers
Change-Id: Ie4f769de3060fdb279320637ba965d5b452e2a2d
Closes-Bug: #1578889
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from smaug.tests.fullstack import smaug_base
class ProvidersTest(smaug_base.SmaugBaseTest):
"""Test Providers operation"""
provider_id = u"cf56bd3e-97a7-4078-b6d5-f36246333fd9"
def test_providers_list(self):
provider_res = self.smaug_client.providers.list()
self.assertEqual(1, len(provider_res))
def test_provider_get(self):
provider_res = self.smaug_client.providers.get(self.provider_id)
self.assertEqual("OS Infra Provider", provider_res.name)
|
<commit_before><commit_msg>Add fullstack tests of the resource providers
Change-Id: Ie4f769de3060fdb279320637ba965d5b452e2a2d
Closes-Bug: #1578889<commit_after>
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from smaug.tests.fullstack import smaug_base
class ProvidersTest(smaug_base.SmaugBaseTest):
"""Test Providers operation"""
provider_id = u"cf56bd3e-97a7-4078-b6d5-f36246333fd9"
def test_providers_list(self):
provider_res = self.smaug_client.providers.list()
self.assertEqual(1, len(provider_res))
def test_provider_get(self):
provider_res = self.smaug_client.providers.get(self.provider_id)
self.assertEqual("OS Infra Provider", provider_res.name)
|
Add fullstack tests of the resource providers
Change-Id: Ie4f769de3060fdb279320637ba965d5b452e2a2d
Closes-Bug: #1578889# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from smaug.tests.fullstack import smaug_base
class ProvidersTest(smaug_base.SmaugBaseTest):
"""Test Providers operation"""
provider_id = u"cf56bd3e-97a7-4078-b6d5-f36246333fd9"
def test_providers_list(self):
provider_res = self.smaug_client.providers.list()
self.assertEqual(1, len(provider_res))
def test_provider_get(self):
provider_res = self.smaug_client.providers.get(self.provider_id)
self.assertEqual("OS Infra Provider", provider_res.name)
|
<commit_before><commit_msg>Add fullstack tests of the resource providers
Change-Id: Ie4f769de3060fdb279320637ba965d5b452e2a2d
Closes-Bug: #1578889<commit_after># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from smaug.tests.fullstack import smaug_base
class ProvidersTest(smaug_base.SmaugBaseTest):
"""Test Providers operation"""
provider_id = u"cf56bd3e-97a7-4078-b6d5-f36246333fd9"
def test_providers_list(self):
provider_res = self.smaug_client.providers.list()
self.assertEqual(1, len(provider_res))
def test_provider_get(self):
provider_res = self.smaug_client.providers.get(self.provider_id)
self.assertEqual("OS Infra Provider", provider_res.name)
|
|
522e71e1fd210994c6745aa012c6663f34d3a397
|
randlov1998/analysis.py
|
randlov1998/analysis.py
|
"""Functions for plotting results, etc.
"""
import numpy as np
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
import pylab as pl
from scipy import r_
from pybrain.tools.customxml.networkreader import NetworkReader
from pybrain.utilities import one_to_n
def plot_nfq_action_value_history(network_name_prefix, count, state=[0, 0, 0, 0, 0], n_actions=9):
"""Example::
>>> plot_nfq_action_value_history('randlov_actionvaluenetwork_',
np.arange(0, 30, 10))
This will plot the data from the files:
randlov_actionvaluenetwork_0.xml
randlov_actionvaluenetwork_10.xml
randlov_actionvaluenetwork_20.xml
randlov_actionvaluenetwork_30.xml
"""
# TODO any file naming.
n_times = len(count)
actionvalues = np.empty((n_times, n_actions))
for i in range(n_times):
fname = network_name_prefix + '%i.xml' % count[i]
actionvalues[i, :] = nfq_action_value(fname)
plt.ion()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
actions = np.arange(n_actions)
X, Y = np.meshgrid(actions, count)
#ax.plot_surface(X, Y, actionvalues)
ax.plot_wireframe(X, Y, actionvalues)
plt.show()
def plot_nfq_action_value(network_name, state=[0, 0, 0, 0, 0]):
"""Plots Q(a) for the given state. Must provide a network serialization
(.xml). Assumes there are 9 action values.
Example::
>>> plot_nfq_action_value('randlov_actionvaluenetwork.xml', [0, 0, 0, 0, 0])
"""
pl.ion()
n_actions = 9
actionvalues = nfq_action_value(network_name, state)
actions = np.arange(len(actionvalues))
bar_width = 0.35
pl.bar(actions, actionvalues, bar_width)
pl.xticks(actions + 0.5 * bar_width, actions)
pl.show()
def nfq_action_value(network_fname, state=[0, 0, 0, 0, 0]):
# TODO generalize away from 9 action values. Ask the network how many
# discrete action values there are.
n_actions = 9
network = NetworkReader.readFrom(network_fname)
actionvalues = np.empty(n_actions)
for i_action in range(n_actions):
network_input = r_[state, one_to_n(i_action, n_actions)]
actionvalues[i_action] = network.activate(network_input)
return actionvalues
|
Add file to visualize nfq network evolution.
|
Add file to visualize nfq network evolution.
|
Python
|
mit
|
chrisdembia/agent-bicycle,chrisdembia/agent-bicycle
|
Add file to visualize nfq network evolution.
|
"""Functions for plotting results, etc.
"""
import numpy as np
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
import pylab as pl
from scipy import r_
from pybrain.tools.customxml.networkreader import NetworkReader
from pybrain.utilities import one_to_n
def plot_nfq_action_value_history(network_name_prefix, count, state=[0, 0, 0, 0, 0], n_actions=9):
"""Example::
>>> plot_nfq_action_value_history('randlov_actionvaluenetwork_',
np.arange(0, 30, 10))
This will plot the data from the files:
randlov_actionvaluenetwork_0.xml
randlov_actionvaluenetwork_10.xml
randlov_actionvaluenetwork_20.xml
randlov_actionvaluenetwork_30.xml
"""
# TODO any file naming.
n_times = len(count)
actionvalues = np.empty((n_times, n_actions))
for i in range(n_times):
fname = network_name_prefix + '%i.xml' % count[i]
actionvalues[i, :] = nfq_action_value(fname)
plt.ion()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
actions = np.arange(n_actions)
X, Y = np.meshgrid(actions, count)
#ax.plot_surface(X, Y, actionvalues)
ax.plot_wireframe(X, Y, actionvalues)
plt.show()
def plot_nfq_action_value(network_name, state=[0, 0, 0, 0, 0]):
"""Plots Q(a) for the given state. Must provide a network serialization
(.xml). Assumes there are 9 action values.
Example::
>>> plot_nfq_action_value('randlov_actionvaluenetwork.xml', [0, 0, 0, 0, 0])
"""
pl.ion()
n_actions = 9
actionvalues = nfq_action_value(network_name, state)
actions = np.arange(len(actionvalues))
bar_width = 0.35
pl.bar(actions, actionvalues, bar_width)
pl.xticks(actions + 0.5 * bar_width, actions)
pl.show()
def nfq_action_value(network_fname, state=[0, 0, 0, 0, 0]):
# TODO generalize away from 9 action values. Ask the network how many
# discrete action values there are.
n_actions = 9
network = NetworkReader.readFrom(network_fname)
actionvalues = np.empty(n_actions)
for i_action in range(n_actions):
network_input = r_[state, one_to_n(i_action, n_actions)]
actionvalues[i_action] = network.activate(network_input)
return actionvalues
|
<commit_before><commit_msg>Add file to visualize nfq network evolution.<commit_after>
|
"""Functions for plotting results, etc.
"""
import numpy as np
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
import pylab as pl
from scipy import r_
from pybrain.tools.customxml.networkreader import NetworkReader
from pybrain.utilities import one_to_n
def plot_nfq_action_value_history(network_name_prefix, count, state=[0, 0, 0, 0, 0], n_actions=9):
"""Example::
>>> plot_nfq_action_value_history('randlov_actionvaluenetwork_',
np.arange(0, 30, 10))
This will plot the data from the files:
randlov_actionvaluenetwork_0.xml
randlov_actionvaluenetwork_10.xml
randlov_actionvaluenetwork_20.xml
randlov_actionvaluenetwork_30.xml
"""
# TODO any file naming.
n_times = len(count)
actionvalues = np.empty((n_times, n_actions))
for i in range(n_times):
fname = network_name_prefix + '%i.xml' % count[i]
actionvalues[i, :] = nfq_action_value(fname)
plt.ion()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
actions = np.arange(n_actions)
X, Y = np.meshgrid(actions, count)
#ax.plot_surface(X, Y, actionvalues)
ax.plot_wireframe(X, Y, actionvalues)
plt.show()
def plot_nfq_action_value(network_name, state=[0, 0, 0, 0, 0]):
"""Plots Q(a) for the given state. Must provide a network serialization
(.xml). Assumes there are 9 action values.
Example::
>>> plot_nfq_action_value('randlov_actionvaluenetwork.xml', [0, 0, 0, 0, 0])
"""
pl.ion()
n_actions = 9
actionvalues = nfq_action_value(network_name, state)
actions = np.arange(len(actionvalues))
bar_width = 0.35
pl.bar(actions, actionvalues, bar_width)
pl.xticks(actions + 0.5 * bar_width, actions)
pl.show()
def nfq_action_value(network_fname, state=[0, 0, 0, 0, 0]):
# TODO generalize away from 9 action values. Ask the network how many
# discrete action values there are.
n_actions = 9
network = NetworkReader.readFrom(network_fname)
actionvalues = np.empty(n_actions)
for i_action in range(n_actions):
network_input = r_[state, one_to_n(i_action, n_actions)]
actionvalues[i_action] = network.activate(network_input)
return actionvalues
|
Add file to visualize nfq network evolution."""Functions for plotting results, etc.
"""
import numpy as np
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
import pylab as pl
from scipy import r_
from pybrain.tools.customxml.networkreader import NetworkReader
from pybrain.utilities import one_to_n
def plot_nfq_action_value_history(network_name_prefix, count, state=[0, 0, 0, 0, 0], n_actions=9):
"""Example::
>>> plot_nfq_action_value_history('randlov_actionvaluenetwork_',
np.arange(0, 30, 10))
This will plot the data from the files:
randlov_actionvaluenetwork_0.xml
randlov_actionvaluenetwork_10.xml
randlov_actionvaluenetwork_20.xml
randlov_actionvaluenetwork_30.xml
"""
# TODO any file naming.
n_times = len(count)
actionvalues = np.empty((n_times, n_actions))
for i in range(n_times):
fname = network_name_prefix + '%i.xml' % count[i]
actionvalues[i, :] = nfq_action_value(fname)
plt.ion()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
actions = np.arange(n_actions)
X, Y = np.meshgrid(actions, count)
#ax.plot_surface(X, Y, actionvalues)
ax.plot_wireframe(X, Y, actionvalues)
plt.show()
def plot_nfq_action_value(network_name, state=[0, 0, 0, 0, 0]):
"""Plots Q(a) for the given state. Must provide a network serialization
(.xml). Assumes there are 9 action values.
Example::
>>> plot_nfq_action_value('randlov_actionvaluenetwork.xml', [0, 0, 0, 0, 0])
"""
pl.ion()
n_actions = 9
actionvalues = nfq_action_value(network_name, state)
actions = np.arange(len(actionvalues))
bar_width = 0.35
pl.bar(actions, actionvalues, bar_width)
pl.xticks(actions + 0.5 * bar_width, actions)
pl.show()
def nfq_action_value(network_fname, state=[0, 0, 0, 0, 0]):
# TODO generalize away from 9 action values. Ask the network how many
# discrete action values there are.
n_actions = 9
network = NetworkReader.readFrom(network_fname)
actionvalues = np.empty(n_actions)
for i_action in range(n_actions):
network_input = r_[state, one_to_n(i_action, n_actions)]
actionvalues[i_action] = network.activate(network_input)
return actionvalues
|
<commit_before><commit_msg>Add file to visualize nfq network evolution.<commit_after>"""Functions for plotting results, etc.
"""
import numpy as np
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
import pylab as pl
from scipy import r_
from pybrain.tools.customxml.networkreader import NetworkReader
from pybrain.utilities import one_to_n
def plot_nfq_action_value_history(network_name_prefix, count, state=[0, 0, 0, 0, 0], n_actions=9):
"""Example::
>>> plot_nfq_action_value_history('randlov_actionvaluenetwork_',
np.arange(0, 30, 10))
This will plot the data from the files:
randlov_actionvaluenetwork_0.xml
randlov_actionvaluenetwork_10.xml
randlov_actionvaluenetwork_20.xml
randlov_actionvaluenetwork_30.xml
"""
# TODO any file naming.
n_times = len(count)
actionvalues = np.empty((n_times, n_actions))
for i in range(n_times):
fname = network_name_prefix + '%i.xml' % count[i]
actionvalues[i, :] = nfq_action_value(fname)
plt.ion()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
actions = np.arange(n_actions)
X, Y = np.meshgrid(actions, count)
#ax.plot_surface(X, Y, actionvalues)
ax.plot_wireframe(X, Y, actionvalues)
plt.show()
def plot_nfq_action_value(network_name, state=[0, 0, 0, 0, 0]):
"""Plots Q(a) for the given state. Must provide a network serialization
(.xml). Assumes there are 9 action values.
Example::
>>> plot_nfq_action_value('randlov_actionvaluenetwork.xml', [0, 0, 0, 0, 0])
"""
pl.ion()
n_actions = 9
actionvalues = nfq_action_value(network_name, state)
actions = np.arange(len(actionvalues))
bar_width = 0.35
pl.bar(actions, actionvalues, bar_width)
pl.xticks(actions + 0.5 * bar_width, actions)
pl.show()
def nfq_action_value(network_fname, state=[0, 0, 0, 0, 0]):
# TODO generalize away from 9 action values. Ask the network how many
# discrete action values there are.
n_actions = 9
network = NetworkReader.readFrom(network_fname)
actionvalues = np.empty(n_actions)
for i_action in range(n_actions):
network_input = r_[state, one_to_n(i_action, n_actions)]
actionvalues[i_action] = network.activate(network_input)
return actionvalues
|
|
00fc7eff1f9c1d1ddcc61210c1a80f966e085d1f
|
course_discovery/apps/course_metadata/migrations/0191_add_microbachelors_program_type.py
|
course_discovery/apps/course_metadata/migrations/0191_add_microbachelors_program_type.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-07-26 18:14
from __future__ import unicode_literals
from django.db import migrations
SEAT_TYPES = ('audit', 'verified',)
PROGRAM_TYPES = ('MicroBachelors',)
def add_program_types(apps, schema_editor): # pylint: disable=unused-argument
SeatType = apps.get_model('course_metadata', 'SeatType')
ProgramType = apps.get_model('course_metadata', 'ProgramType')
filtered_seat_types = SeatType.objects.filter(slug__in=SEAT_TYPES)
for name in PROGRAM_TYPES:
program_type, __ = ProgramType.objects.update_or_create(name=name)
program_type.applicable_seat_types.clear()
program_type.applicable_seat_types.add(*filtered_seat_types)
program_type.save()
def drop_program_types(apps, schema_editor): # pylint: disable=unused-argument
ProgramType = apps.get_model('course_metadata', 'ProgramType')
ProgramType.objects.filter(name__in=PROGRAM_TYPES).delete()
class Migration(migrations.Migration):
dependencies = [
('course_metadata', '0190_remove_entitlement_expires'),
]
operations = [
migrations.RunPython(
code=add_program_types,
reverse_code=drop_program_types,
),
]
|
Add MicroBachelors program_type Co-authored-by: Lise <albemarle>
|
MICROB-3: Add MicroBachelors program_type
Co-authored-by: Lise <albemarle>
|
Python
|
agpl-3.0
|
edx/course-discovery,edx/course-discovery,edx/course-discovery,edx/course-discovery
|
MICROB-3: Add MicroBachelors program_type
Co-authored-by: Lise <albemarle>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-07-26 18:14
from __future__ import unicode_literals
from django.db import migrations
SEAT_TYPES = ('audit', 'verified',)
PROGRAM_TYPES = ('MicroBachelors',)
def add_program_types(apps, schema_editor): # pylint: disable=unused-argument
SeatType = apps.get_model('course_metadata', 'SeatType')
ProgramType = apps.get_model('course_metadata', 'ProgramType')
filtered_seat_types = SeatType.objects.filter(slug__in=SEAT_TYPES)
for name in PROGRAM_TYPES:
program_type, __ = ProgramType.objects.update_or_create(name=name)
program_type.applicable_seat_types.clear()
program_type.applicable_seat_types.add(*filtered_seat_types)
program_type.save()
def drop_program_types(apps, schema_editor): # pylint: disable=unused-argument
ProgramType = apps.get_model('course_metadata', 'ProgramType')
ProgramType.objects.filter(name__in=PROGRAM_TYPES).delete()
class Migration(migrations.Migration):
dependencies = [
('course_metadata', '0190_remove_entitlement_expires'),
]
operations = [
migrations.RunPython(
code=add_program_types,
reverse_code=drop_program_types,
),
]
|
<commit_before><commit_msg>MICROB-3: Add MicroBachelors program_type
Co-authored-by: Lise <albemarle><commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-07-26 18:14
from __future__ import unicode_literals
from django.db import migrations
SEAT_TYPES = ('audit', 'verified',)
PROGRAM_TYPES = ('MicroBachelors',)
def add_program_types(apps, schema_editor): # pylint: disable=unused-argument
SeatType = apps.get_model('course_metadata', 'SeatType')
ProgramType = apps.get_model('course_metadata', 'ProgramType')
filtered_seat_types = SeatType.objects.filter(slug__in=SEAT_TYPES)
for name in PROGRAM_TYPES:
program_type, __ = ProgramType.objects.update_or_create(name=name)
program_type.applicable_seat_types.clear()
program_type.applicable_seat_types.add(*filtered_seat_types)
program_type.save()
def drop_program_types(apps, schema_editor): # pylint: disable=unused-argument
ProgramType = apps.get_model('course_metadata', 'ProgramType')
ProgramType.objects.filter(name__in=PROGRAM_TYPES).delete()
class Migration(migrations.Migration):
dependencies = [
('course_metadata', '0190_remove_entitlement_expires'),
]
operations = [
migrations.RunPython(
code=add_program_types,
reverse_code=drop_program_types,
),
]
|
MICROB-3: Add MicroBachelors program_type
Co-authored-by: Lise <albemarle># -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-07-26 18:14
from __future__ import unicode_literals
from django.db import migrations
SEAT_TYPES = ('audit', 'verified',)
PROGRAM_TYPES = ('MicroBachelors',)
def add_program_types(apps, schema_editor): # pylint: disable=unused-argument
SeatType = apps.get_model('course_metadata', 'SeatType')
ProgramType = apps.get_model('course_metadata', 'ProgramType')
filtered_seat_types = SeatType.objects.filter(slug__in=SEAT_TYPES)
for name in PROGRAM_TYPES:
program_type, __ = ProgramType.objects.update_or_create(name=name)
program_type.applicable_seat_types.clear()
program_type.applicable_seat_types.add(*filtered_seat_types)
program_type.save()
def drop_program_types(apps, schema_editor): # pylint: disable=unused-argument
ProgramType = apps.get_model('course_metadata', 'ProgramType')
ProgramType.objects.filter(name__in=PROGRAM_TYPES).delete()
class Migration(migrations.Migration):
dependencies = [
('course_metadata', '0190_remove_entitlement_expires'),
]
operations = [
migrations.RunPython(
code=add_program_types,
reverse_code=drop_program_types,
),
]
|
<commit_before><commit_msg>MICROB-3: Add MicroBachelors program_type
Co-authored-by: Lise <albemarle><commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-07-26 18:14
from __future__ import unicode_literals
from django.db import migrations
SEAT_TYPES = ('audit', 'verified',)
PROGRAM_TYPES = ('MicroBachelors',)
def add_program_types(apps, schema_editor): # pylint: disable=unused-argument
SeatType = apps.get_model('course_metadata', 'SeatType')
ProgramType = apps.get_model('course_metadata', 'ProgramType')
filtered_seat_types = SeatType.objects.filter(slug__in=SEAT_TYPES)
for name in PROGRAM_TYPES:
program_type, __ = ProgramType.objects.update_or_create(name=name)
program_type.applicable_seat_types.clear()
program_type.applicable_seat_types.add(*filtered_seat_types)
program_type.save()
def drop_program_types(apps, schema_editor): # pylint: disable=unused-argument
ProgramType = apps.get_model('course_metadata', 'ProgramType')
ProgramType.objects.filter(name__in=PROGRAM_TYPES).delete()
class Migration(migrations.Migration):
dependencies = [
('course_metadata', '0190_remove_entitlement_expires'),
]
operations = [
migrations.RunPython(
code=add_program_types,
reverse_code=drop_program_types,
),
]
|
|
175114bb680fae35a1248e41e15a2e82a5cbdc3f
|
feed_sources/AlbanyNy.py
|
feed_sources/AlbanyNy.py
|
"""Fetch Capital District Transportation Authority (Albany, New York) feed."""
import logging
from FeedSource import FeedSource
URL = 'http://www.cdta.org/schedules/google_transit.zip'
LOG = logging.getLogger(__name__)
class AlbanyNy(FeedSource):
"""Fetch CDTA feed."""
def __init__(self):
super(AlbanyNy, self).__init__()
self.urls = {'albany_ny.zip': URL}
|
Add CDTA (for Albany, NY)
|
Add CDTA (for Albany, NY)
|
Python
|
mit
|
azavea/gtfs-feed-fetcher,flibbertigibbet/gtfs-feed-fetcher
|
Add CDTA (for Albany, NY)
|
"""Fetch Capital District Transportation Authority (Albany, New York) feed."""
import logging
from FeedSource import FeedSource
URL = 'http://www.cdta.org/schedules/google_transit.zip'
LOG = logging.getLogger(__name__)
class AlbanyNy(FeedSource):
"""Fetch CDTA feed."""
def __init__(self):
super(AlbanyNy, self).__init__()
self.urls = {'albany_ny.zip': URL}
|
<commit_before><commit_msg>Add CDTA (for Albany, NY)<commit_after>
|
"""Fetch Capital District Transportation Authority (Albany, New York) feed."""
import logging
from FeedSource import FeedSource
URL = 'http://www.cdta.org/schedules/google_transit.zip'
LOG = logging.getLogger(__name__)
class AlbanyNy(FeedSource):
"""Fetch CDTA feed."""
def __init__(self):
super(AlbanyNy, self).__init__()
self.urls = {'albany_ny.zip': URL}
|
Add CDTA (for Albany, NY)"""Fetch Capital District Transportation Authority (Albany, New York) feed."""
import logging
from FeedSource import FeedSource
URL = 'http://www.cdta.org/schedules/google_transit.zip'
LOG = logging.getLogger(__name__)
class AlbanyNy(FeedSource):
"""Fetch CDTA feed."""
def __init__(self):
super(AlbanyNy, self).__init__()
self.urls = {'albany_ny.zip': URL}
|
<commit_before><commit_msg>Add CDTA (for Albany, NY)<commit_after>"""Fetch Capital District Transportation Authority (Albany, New York) feed."""
import logging
from FeedSource import FeedSource
URL = 'http://www.cdta.org/schedules/google_transit.zip'
LOG = logging.getLogger(__name__)
class AlbanyNy(FeedSource):
"""Fetch CDTA feed."""
def __init__(self):
super(AlbanyNy, self).__init__()
self.urls = {'albany_ny.zip': URL}
|
|
3ad4d95f0a886e5722717bd6b9b91a09dfcb9194
|
src/dbus_python_client_gen/_gmoreaders.py
|
src/dbus_python_client_gen/_gmoreaders.py
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
Code for generating classes suitable for invoking dbus-python methods.
"""
from ._errors import DPClientGenerationError
from ._errors import DPClientRuntimeError
def gmo_reader_builder(spec):
"""
Returns a function that builds a method interface based on 'spec'.
This method interface is a simple one to return the values of
properties from a table generated by a GetManagedObjects() method call
for the object that implements the given interface.
:param spec: the interface specification
:type spec: Element
"""
interface_name = spec.attrib.get('name')
if interface_name is None: # pragma: no cover
raise DPClientGenerationError("No name found for interface.")
def builder(namespace):
"""
The property class's namespace.
:param namespace: the class's namespace
"""
def build_property(name):
"""
Build a single property getter for this class.
:param str name: the property name
:returns: the value of the property
:rtype: object
"""
def dbus_func(self): # pragma: no cover
"""
The property getter.
"""
# pylint: disable=protected-access
try:
return self._table[interface_name][name]
except KeyError:
raise DPClientRuntimeError(
"No entry found for interface %s and property %s" %
(interface_name, name)
)
return dbus_func
for prop in spec.findall('./property'):
name = prop.attrib.get('name')
namespace[name] = build_property(name)
def __init__(self, table):
"""
The initalizer for this class.
"""
self._table = table # pylint: disable=protected-access
namespace['__init__'] = __init__
return builder
|
Add a method for generating GetManagedObjects() data wrappers
|
Add a method for generating GetManagedObjects() data wrappers
Signed-off-by: mulhern <7b51bcf507bcd7afb72bf8663752c0ddbeb517f6@redhat.com>
|
Python
|
mpl-2.0
|
mulkieran/dbus-python-client-gen
|
Add a method for generating GetManagedObjects() data wrappers
Signed-off-by: mulhern <7b51bcf507bcd7afb72bf8663752c0ddbeb517f6@redhat.com>
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
Code for generating classes suitable for invoking dbus-python methods.
"""
from ._errors import DPClientGenerationError
from ._errors import DPClientRuntimeError
def gmo_reader_builder(spec):
"""
Returns a function that builds a method interface based on 'spec'.
This method interface is a simple one to return the values of
properties from a table generated by a GetManagedObjects() method call
for the object that implements the given interface.
:param spec: the interface specification
:type spec: Element
"""
interface_name = spec.attrib.get('name')
if interface_name is None: # pragma: no cover
raise DPClientGenerationError("No name found for interface.")
def builder(namespace):
"""
The property class's namespace.
:param namespace: the class's namespace
"""
def build_property(name):
"""
Build a single property getter for this class.
:param str name: the property name
:returns: the value of the property
:rtype: object
"""
def dbus_func(self): # pragma: no cover
"""
The property getter.
"""
# pylint: disable=protected-access
try:
return self._table[interface_name][name]
except KeyError:
raise DPClientRuntimeError(
"No entry found for interface %s and property %s" %
(interface_name, name)
)
return dbus_func
for prop in spec.findall('./property'):
name = prop.attrib.get('name')
namespace[name] = build_property(name)
def __init__(self, table):
"""
The initalizer for this class.
"""
self._table = table # pylint: disable=protected-access
namespace['__init__'] = __init__
return builder
|
<commit_before><commit_msg>Add a method for generating GetManagedObjects() data wrappers
Signed-off-by: mulhern <7b51bcf507bcd7afb72bf8663752c0ddbeb517f6@redhat.com><commit_after>
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
Code for generating classes suitable for invoking dbus-python methods.
"""
from ._errors import DPClientGenerationError
from ._errors import DPClientRuntimeError
def gmo_reader_builder(spec):
"""
Returns a function that builds a method interface based on 'spec'.
This method interface is a simple one to return the values of
properties from a table generated by a GetManagedObjects() method call
for the object that implements the given interface.
:param spec: the interface specification
:type spec: Element
"""
interface_name = spec.attrib.get('name')
if interface_name is None: # pragma: no cover
raise DPClientGenerationError("No name found for interface.")
def builder(namespace):
"""
The property class's namespace.
:param namespace: the class's namespace
"""
def build_property(name):
"""
Build a single property getter for this class.
:param str name: the property name
:returns: the value of the property
:rtype: object
"""
def dbus_func(self): # pragma: no cover
"""
The property getter.
"""
# pylint: disable=protected-access
try:
return self._table[interface_name][name]
except KeyError:
raise DPClientRuntimeError(
"No entry found for interface %s and property %s" %
(interface_name, name)
)
return dbus_func
for prop in spec.findall('./property'):
name = prop.attrib.get('name')
namespace[name] = build_property(name)
def __init__(self, table):
"""
The initalizer for this class.
"""
self._table = table # pylint: disable=protected-access
namespace['__init__'] = __init__
return builder
|
Add a method for generating GetManagedObjects() data wrappers
Signed-off-by: mulhern <7b51bcf507bcd7afb72bf8663752c0ddbeb517f6@redhat.com># This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
Code for generating classes suitable for invoking dbus-python methods.
"""
from ._errors import DPClientGenerationError
from ._errors import DPClientRuntimeError
def gmo_reader_builder(spec):
"""
Returns a function that builds a method interface based on 'spec'.
This method interface is a simple one to return the values of
properties from a table generated by a GetManagedObjects() method call
for the object that implements the given interface.
:param spec: the interface specification
:type spec: Element
"""
interface_name = spec.attrib.get('name')
if interface_name is None: # pragma: no cover
raise DPClientGenerationError("No name found for interface.")
def builder(namespace):
"""
The property class's namespace.
:param namespace: the class's namespace
"""
def build_property(name):
"""
Build a single property getter for this class.
:param str name: the property name
:returns: the value of the property
:rtype: object
"""
def dbus_func(self): # pragma: no cover
"""
The property getter.
"""
# pylint: disable=protected-access
try:
return self._table[interface_name][name]
except KeyError:
raise DPClientRuntimeError(
"No entry found for interface %s and property %s" %
(interface_name, name)
)
return dbus_func
for prop in spec.findall('./property'):
name = prop.attrib.get('name')
namespace[name] = build_property(name)
def __init__(self, table):
"""
The initalizer for this class.
"""
self._table = table # pylint: disable=protected-access
namespace['__init__'] = __init__
return builder
|
<commit_before><commit_msg>Add a method for generating GetManagedObjects() data wrappers
Signed-off-by: mulhern <7b51bcf507bcd7afb72bf8663752c0ddbeb517f6@redhat.com><commit_after># This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
Code for generating classes suitable for invoking dbus-python methods.
"""
from ._errors import DPClientGenerationError
from ._errors import DPClientRuntimeError
def gmo_reader_builder(spec):
"""
Returns a function that builds a method interface based on 'spec'.
This method interface is a simple one to return the values of
properties from a table generated by a GetManagedObjects() method call
for the object that implements the given interface.
:param spec: the interface specification
:type spec: Element
"""
interface_name = spec.attrib.get('name')
if interface_name is None: # pragma: no cover
raise DPClientGenerationError("No name found for interface.")
def builder(namespace):
"""
The property class's namespace.
:param namespace: the class's namespace
"""
def build_property(name):
"""
Build a single property getter for this class.
:param str name: the property name
:returns: the value of the property
:rtype: object
"""
def dbus_func(self): # pragma: no cover
"""
The property getter.
"""
# pylint: disable=protected-access
try:
return self._table[interface_name][name]
except KeyError:
raise DPClientRuntimeError(
"No entry found for interface %s and property %s" %
(interface_name, name)
)
return dbus_func
for prop in spec.findall('./property'):
name = prop.attrib.get('name')
namespace[name] = build_property(name)
def __init__(self, table):
"""
The initalizer for this class.
"""
self._table = table # pylint: disable=protected-access
namespace['__init__'] = __init__
return builder
|
|
43c112c07c4a990b16276eebded2e33bf92ca174
|
src/trajectory/lemniscate_trajectory.py
|
src/trajectory/lemniscate_trajectory.py
|
#!/usr/bin/env python
from math import sqrt, cos, pi, sin
from .trajectory import Trajectory
class LemniscateTrajectory(object, Trajectory):
def __init__(self, radius, period):
Trajectory.__init__(self)
self.radius = radius
self. period = period
def get_position_at(self, t):
super(LemniscateTrajectory, self).get_position_at(t)
self.position.x = 2 * sqrt(2) * cos(2 * pi* t / self.period) / (sin(2 * pi * t / self.period) ** 2 + 1)
self.position.y = 2 * sqrt(2) * sin(2 * pi* t / self.period) * cos(2 * pi* t / self.period) / (sin(2 * pi * t / self.period) ** 2 + 1)
return self.position
|
Implement Lemniscate of Bernoulli trajectory
|
feat: Implement Lemniscate of Bernoulli trajectory
|
Python
|
mit
|
bit0001/trajectory_tracking,bit0001/trajectory_tracking
|
feat: Implement Lemniscate of Bernoulli trajectory
|
#!/usr/bin/env python
from math import sqrt, cos, pi, sin
from .trajectory import Trajectory
class LemniscateTrajectory(object, Trajectory):
def __init__(self, radius, period):
Trajectory.__init__(self)
self.radius = radius
self. period = period
def get_position_at(self, t):
super(LemniscateTrajectory, self).get_position_at(t)
self.position.x = 2 * sqrt(2) * cos(2 * pi* t / self.period) / (sin(2 * pi * t / self.period) ** 2 + 1)
self.position.y = 2 * sqrt(2) * sin(2 * pi* t / self.period) * cos(2 * pi* t / self.period) / (sin(2 * pi * t / self.period) ** 2 + 1)
return self.position
|
<commit_before><commit_msg>feat: Implement Lemniscate of Bernoulli trajectory<commit_after>
|
#!/usr/bin/env python
from math import sqrt, cos, pi, sin
from .trajectory import Trajectory
class LemniscateTrajectory(object, Trajectory):
def __init__(self, radius, period):
Trajectory.__init__(self)
self.radius = radius
self. period = period
def get_position_at(self, t):
super(LemniscateTrajectory, self).get_position_at(t)
self.position.x = 2 * sqrt(2) * cos(2 * pi* t / self.period) / (sin(2 * pi * t / self.period) ** 2 + 1)
self.position.y = 2 * sqrt(2) * sin(2 * pi* t / self.period) * cos(2 * pi* t / self.period) / (sin(2 * pi * t / self.period) ** 2 + 1)
return self.position
|
feat: Implement Lemniscate of Bernoulli trajectory#!/usr/bin/env python
from math import sqrt, cos, pi, sin
from .trajectory import Trajectory
class LemniscateTrajectory(object, Trajectory):
def __init__(self, radius, period):
Trajectory.__init__(self)
self.radius = radius
self. period = period
def get_position_at(self, t):
super(LemniscateTrajectory, self).get_position_at(t)
self.position.x = 2 * sqrt(2) * cos(2 * pi* t / self.period) / (sin(2 * pi * t / self.period) ** 2 + 1)
self.position.y = 2 * sqrt(2) * sin(2 * pi* t / self.period) * cos(2 * pi* t / self.period) / (sin(2 * pi * t / self.period) ** 2 + 1)
return self.position
|
<commit_before><commit_msg>feat: Implement Lemniscate of Bernoulli trajectory<commit_after>#!/usr/bin/env python
from math import sqrt, cos, pi, sin
from .trajectory import Trajectory
class LemniscateTrajectory(object, Trajectory):
def __init__(self, radius, period):
Trajectory.__init__(self)
self.radius = radius
self. period = period
def get_position_at(self, t):
super(LemniscateTrajectory, self).get_position_at(t)
self.position.x = 2 * sqrt(2) * cos(2 * pi* t / self.period) / (sin(2 * pi * t / self.period) ** 2 + 1)
self.position.y = 2 * sqrt(2) * sin(2 * pi* t / self.period) * cos(2 * pi* t / self.period) / (sin(2 * pi * t / self.period) ** 2 + 1)
return self.position
|
|
fca96bc38abdfbc075b4dd0e85d3cd48be30f9bc
|
CodeFights/circleOfNumbers.py
|
CodeFights/circleOfNumbers.py
|
#!/usr/local/bin/python
# Code Fights Circle of Numbers Problem
def circleOfNumbers(n, firstNumber):
pass
def main():
tests = [
["crazy", "dsbaz"],
["z", "a"]
]
for t in tests:
res = circleOfNumbers(t[0], t[1])
if t[2] == res:
print("PASSED: circleOfNumbers({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print("FAILED: circleOfNumbers({}, {}) returned {}, answer: {}"
.format(t[0], t[1], res, t[2]))
if __name__ == '__main__':
main()
|
Set up Code Fights circle of numbers problem
|
Set up Code Fights circle of numbers problem
|
Python
|
mit
|
HKuz/Test_Code
|
Set up Code Fights circle of numbers problem
|
#!/usr/local/bin/python
# Code Fights Circle of Numbers Problem
def circleOfNumbers(n, firstNumber):
pass
def main():
tests = [
["crazy", "dsbaz"],
["z", "a"]
]
for t in tests:
res = circleOfNumbers(t[0], t[1])
if t[2] == res:
print("PASSED: circleOfNumbers({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print("FAILED: circleOfNumbers({}, {}) returned {}, answer: {}"
.format(t[0], t[1], res, t[2]))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Set up Code Fights circle of numbers problem<commit_after>
|
#!/usr/local/bin/python
# Code Fights Circle of Numbers Problem
def circleOfNumbers(n, firstNumber):
pass
def main():
tests = [
["crazy", "dsbaz"],
["z", "a"]
]
for t in tests:
res = circleOfNumbers(t[0], t[1])
if t[2] == res:
print("PASSED: circleOfNumbers({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print("FAILED: circleOfNumbers({}, {}) returned {}, answer: {}"
.format(t[0], t[1], res, t[2]))
if __name__ == '__main__':
main()
|
Set up Code Fights circle of numbers problem#!/usr/local/bin/python
# Code Fights Circle of Numbers Problem
def circleOfNumbers(n, firstNumber):
pass
def main():
tests = [
["crazy", "dsbaz"],
["z", "a"]
]
for t in tests:
res = circleOfNumbers(t[0], t[1])
if t[2] == res:
print("PASSED: circleOfNumbers({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print("FAILED: circleOfNumbers({}, {}) returned {}, answer: {}"
.format(t[0], t[1], res, t[2]))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Set up Code Fights circle of numbers problem<commit_after>#!/usr/local/bin/python
# Code Fights Circle of Numbers Problem
def circleOfNumbers(n, firstNumber):
pass
def main():
tests = [
["crazy", "dsbaz"],
["z", "a"]
]
for t in tests:
res = circleOfNumbers(t[0], t[1])
if t[2] == res:
print("PASSED: circleOfNumbers({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print("FAILED: circleOfNumbers({}, {}) returned {}, answer: {}"
.format(t[0], t[1], res, t[2]))
if __name__ == '__main__':
main()
|
|
221f949958e3ec0a73a90f3db9a451b743797761
|
tests/client/test_encryption_client.py
|
tests/client/test_encryption_client.py
|
from __future__ import unicode_literals
import os
import unittest
from qingstor.sdk.client.encryption_client import EncryptionClient
RIGHT_OUT_KWARGS = {
"content_type": "video",
"x_qs_encryption_customer_algorithm": "AES256",
"x_qs_encryption_customer_key":
"MDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDA=",
"x_qs_encryption_customer_key_md5": "zZ5FnqcIqUjVwvWmyog4zw=="
}
class TestEncryptionFileChunk(unittest.TestCase):
def test_apply_encryption_headers(self):
ec = EncryptionClient(
"test_config", "test_client", "test_bucket", "test_zone", b"0" * 32
)
out_kwargs = ec.apply_encrypt_headers(content_type="video")
self.assertEqual(out_kwargs, RIGHT_OUT_KWARGS)
if __name__ == '__main__':
unittest.main()
|
Create test class for encryption client
|
Create test class for encryption client
|
Python
|
apache-2.0
|
yunify/qingstor-sdk-python
|
Create test class for encryption client
|
from __future__ import unicode_literals
import os
import unittest
from qingstor.sdk.client.encryption_client import EncryptionClient
RIGHT_OUT_KWARGS = {
"content_type": "video",
"x_qs_encryption_customer_algorithm": "AES256",
"x_qs_encryption_customer_key":
"MDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDA=",
"x_qs_encryption_customer_key_md5": "zZ5FnqcIqUjVwvWmyog4zw=="
}
class TestEncryptionFileChunk(unittest.TestCase):
def test_apply_encryption_headers(self):
ec = EncryptionClient(
"test_config", "test_client", "test_bucket", "test_zone", b"0" * 32
)
out_kwargs = ec.apply_encrypt_headers(content_type="video")
self.assertEqual(out_kwargs, RIGHT_OUT_KWARGS)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Create test class for encryption client<commit_after>
|
from __future__ import unicode_literals
import os
import unittest
from qingstor.sdk.client.encryption_client import EncryptionClient
RIGHT_OUT_KWARGS = {
"content_type": "video",
"x_qs_encryption_customer_algorithm": "AES256",
"x_qs_encryption_customer_key":
"MDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDA=",
"x_qs_encryption_customer_key_md5": "zZ5FnqcIqUjVwvWmyog4zw=="
}
class TestEncryptionFileChunk(unittest.TestCase):
def test_apply_encryption_headers(self):
ec = EncryptionClient(
"test_config", "test_client", "test_bucket", "test_zone", b"0" * 32
)
out_kwargs = ec.apply_encrypt_headers(content_type="video")
self.assertEqual(out_kwargs, RIGHT_OUT_KWARGS)
if __name__ == '__main__':
unittest.main()
|
Create test class for encryption clientfrom __future__ import unicode_literals
import os
import unittest
from qingstor.sdk.client.encryption_client import EncryptionClient
RIGHT_OUT_KWARGS = {
"content_type": "video",
"x_qs_encryption_customer_algorithm": "AES256",
"x_qs_encryption_customer_key":
"MDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDA=",
"x_qs_encryption_customer_key_md5": "zZ5FnqcIqUjVwvWmyog4zw=="
}
class TestEncryptionFileChunk(unittest.TestCase):
def test_apply_encryption_headers(self):
ec = EncryptionClient(
"test_config", "test_client", "test_bucket", "test_zone", b"0" * 32
)
out_kwargs = ec.apply_encrypt_headers(content_type="video")
self.assertEqual(out_kwargs, RIGHT_OUT_KWARGS)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Create test class for encryption client<commit_after>from __future__ import unicode_literals
import os
import unittest
from qingstor.sdk.client.encryption_client import EncryptionClient
RIGHT_OUT_KWARGS = {
"content_type": "video",
"x_qs_encryption_customer_algorithm": "AES256",
"x_qs_encryption_customer_key":
"MDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDA=",
"x_qs_encryption_customer_key_md5": "zZ5FnqcIqUjVwvWmyog4zw=="
}
class TestEncryptionFileChunk(unittest.TestCase):
def test_apply_encryption_headers(self):
ec = EncryptionClient(
"test_config", "test_client", "test_bucket", "test_zone", b"0" * 32
)
out_kwargs = ec.apply_encrypt_headers(content_type="video")
self.assertEqual(out_kwargs, RIGHT_OUT_KWARGS)
if __name__ == '__main__':
unittest.main()
|
|
5012e2b28920ff96f8ed14911866818a0ba032fa
|
tests/test_dft.py
|
tests/test_dft.py
|
import unittest
import numpy as np
from pyquante2.dft.functionals import xs,cvwn
from pyquante2.dft.reference import data
class test_dft(unittest.TestCase):
def test_xs(self):
na = data['xs'][:,0]
nb = data['xs'][:,1]
fa,dfa = xs(na)
fb,dfb = xs(nb)
max_f = np.max(fa+fb-data['xs'][:,5])
max_dfa = np.max(dfa-data['xs'][:,6])
max_dfb = np.max(dfb-data['xs'][:,7])
self.assertAlmostEqual(max_f,0,5) ## Fix this!
self.assertAlmostEqual(max_dfa,0)
self.assertAlmostEqual(max_dfb,0)
def test_cvwn(self):
na = data['xs'][:,0]
nb = data['xs'][:,1]
f,dfa,dfb = cvwn(na,nb)
max_f = np.max(f-data['cvwn'][:,5])
max_dfa = np.max(dfa-data['cvwn'][:,6])
max_dfb = np.max(dfb-data['cvwn'][:,7])
self.assertAlmostEqual(max_f,0)
self.assertAlmostEqual(max_dfa,0)
#self.assertAlmostEqual(max_dfb,0,6) ## Fix this!
def runsuite(verbose=True):
if verbose: verbosity=2
else: verbosity=1
suite = unittest.TestLoader().loadTestsFromTestCase(test_dft)
unittest.TextTestRunner(verbosity=verbosity).run(suite)
return
def debugsuite():
import cProfile,pstats
cProfile.run('runsuite()','prof')
prof = pstats.Stats('prof')
prof.strip_dirs().sort_stats('time').print_stats(15)
if __name__ == '__main__':
import sys
if "-d" in sys.argv:
debugsuite()
else:
runsuite()
|
Test suite for lda functionals
|
Test suite for lda functionals
|
Python
|
bsd-3-clause
|
Konjkov/pyquante2,Konjkov/pyquante2,Konjkov/pyquante2
|
Test suite for lda functionals
|
import unittest
import numpy as np
from pyquante2.dft.functionals import xs,cvwn
from pyquante2.dft.reference import data
class test_dft(unittest.TestCase):
def test_xs(self):
na = data['xs'][:,0]
nb = data['xs'][:,1]
fa,dfa = xs(na)
fb,dfb = xs(nb)
max_f = np.max(fa+fb-data['xs'][:,5])
max_dfa = np.max(dfa-data['xs'][:,6])
max_dfb = np.max(dfb-data['xs'][:,7])
self.assertAlmostEqual(max_f,0,5) ## Fix this!
self.assertAlmostEqual(max_dfa,0)
self.assertAlmostEqual(max_dfb,0)
def test_cvwn(self):
na = data['xs'][:,0]
nb = data['xs'][:,1]
f,dfa,dfb = cvwn(na,nb)
max_f = np.max(f-data['cvwn'][:,5])
max_dfa = np.max(dfa-data['cvwn'][:,6])
max_dfb = np.max(dfb-data['cvwn'][:,7])
self.assertAlmostEqual(max_f,0)
self.assertAlmostEqual(max_dfa,0)
#self.assertAlmostEqual(max_dfb,0,6) ## Fix this!
def runsuite(verbose=True):
if verbose: verbosity=2
else: verbosity=1
suite = unittest.TestLoader().loadTestsFromTestCase(test_dft)
unittest.TextTestRunner(verbosity=verbosity).run(suite)
return
def debugsuite():
import cProfile,pstats
cProfile.run('runsuite()','prof')
prof = pstats.Stats('prof')
prof.strip_dirs().sort_stats('time').print_stats(15)
if __name__ == '__main__':
import sys
if "-d" in sys.argv:
debugsuite()
else:
runsuite()
|
<commit_before><commit_msg>Test suite for lda functionals<commit_after>
|
import unittest
import numpy as np
from pyquante2.dft.functionals import xs,cvwn
from pyquante2.dft.reference import data
class test_dft(unittest.TestCase):
def test_xs(self):
na = data['xs'][:,0]
nb = data['xs'][:,1]
fa,dfa = xs(na)
fb,dfb = xs(nb)
max_f = np.max(fa+fb-data['xs'][:,5])
max_dfa = np.max(dfa-data['xs'][:,6])
max_dfb = np.max(dfb-data['xs'][:,7])
self.assertAlmostEqual(max_f,0,5) ## Fix this!
self.assertAlmostEqual(max_dfa,0)
self.assertAlmostEqual(max_dfb,0)
def test_cvwn(self):
na = data['xs'][:,0]
nb = data['xs'][:,1]
f,dfa,dfb = cvwn(na,nb)
max_f = np.max(f-data['cvwn'][:,5])
max_dfa = np.max(dfa-data['cvwn'][:,6])
max_dfb = np.max(dfb-data['cvwn'][:,7])
self.assertAlmostEqual(max_f,0)
self.assertAlmostEqual(max_dfa,0)
#self.assertAlmostEqual(max_dfb,0,6) ## Fix this!
def runsuite(verbose=True):
if verbose: verbosity=2
else: verbosity=1
suite = unittest.TestLoader().loadTestsFromTestCase(test_dft)
unittest.TextTestRunner(verbosity=verbosity).run(suite)
return
def debugsuite():
import cProfile,pstats
cProfile.run('runsuite()','prof')
prof = pstats.Stats('prof')
prof.strip_dirs().sort_stats('time').print_stats(15)
if __name__ == '__main__':
import sys
if "-d" in sys.argv:
debugsuite()
else:
runsuite()
|
Test suite for lda functionalsimport unittest
import numpy as np
from pyquante2.dft.functionals import xs,cvwn
from pyquante2.dft.reference import data
class test_dft(unittest.TestCase):
def test_xs(self):
na = data['xs'][:,0]
nb = data['xs'][:,1]
fa,dfa = xs(na)
fb,dfb = xs(nb)
max_f = np.max(fa+fb-data['xs'][:,5])
max_dfa = np.max(dfa-data['xs'][:,6])
max_dfb = np.max(dfb-data['xs'][:,7])
self.assertAlmostEqual(max_f,0,5) ## Fix this!
self.assertAlmostEqual(max_dfa,0)
self.assertAlmostEqual(max_dfb,0)
def test_cvwn(self):
na = data['xs'][:,0]
nb = data['xs'][:,1]
f,dfa,dfb = cvwn(na,nb)
max_f = np.max(f-data['cvwn'][:,5])
max_dfa = np.max(dfa-data['cvwn'][:,6])
max_dfb = np.max(dfb-data['cvwn'][:,7])
self.assertAlmostEqual(max_f,0)
self.assertAlmostEqual(max_dfa,0)
#self.assertAlmostEqual(max_dfb,0,6) ## Fix this!
def runsuite(verbose=True):
if verbose: verbosity=2
else: verbosity=1
suite = unittest.TestLoader().loadTestsFromTestCase(test_dft)
unittest.TextTestRunner(verbosity=verbosity).run(suite)
return
def debugsuite():
import cProfile,pstats
cProfile.run('runsuite()','prof')
prof = pstats.Stats('prof')
prof.strip_dirs().sort_stats('time').print_stats(15)
if __name__ == '__main__':
import sys
if "-d" in sys.argv:
debugsuite()
else:
runsuite()
|
<commit_before><commit_msg>Test suite for lda functionals<commit_after>import unittest
import numpy as np
from pyquante2.dft.functionals import xs,cvwn
from pyquante2.dft.reference import data
class test_dft(unittest.TestCase):
def test_xs(self):
na = data['xs'][:,0]
nb = data['xs'][:,1]
fa,dfa = xs(na)
fb,dfb = xs(nb)
max_f = np.max(fa+fb-data['xs'][:,5])
max_dfa = np.max(dfa-data['xs'][:,6])
max_dfb = np.max(dfb-data['xs'][:,7])
self.assertAlmostEqual(max_f,0,5) ## Fix this!
self.assertAlmostEqual(max_dfa,0)
self.assertAlmostEqual(max_dfb,0)
def test_cvwn(self):
na = data['xs'][:,0]
nb = data['xs'][:,1]
f,dfa,dfb = cvwn(na,nb)
max_f = np.max(f-data['cvwn'][:,5])
max_dfa = np.max(dfa-data['cvwn'][:,6])
max_dfb = np.max(dfb-data['cvwn'][:,7])
self.assertAlmostEqual(max_f,0)
self.assertAlmostEqual(max_dfa,0)
#self.assertAlmostEqual(max_dfb,0,6) ## Fix this!
def runsuite(verbose=True):
if verbose: verbosity=2
else: verbosity=1
suite = unittest.TestLoader().loadTestsFromTestCase(test_dft)
unittest.TextTestRunner(verbosity=verbosity).run(suite)
return
def debugsuite():
import cProfile,pstats
cProfile.run('runsuite()','prof')
prof = pstats.Stats('prof')
prof.strip_dirs().sort_stats('time').print_stats(15)
if __name__ == '__main__':
import sys
if "-d" in sys.argv:
debugsuite()
else:
runsuite()
|
|
98877b442080c1a653192da71e6c582a67643e97
|
tools/data/box_proto_from_proposals.py
|
tools/data/box_proto_from_proposals.py
|
#!/usr/bin/env python
import os, sys
import pdb
import argparse
import scipy.io as sio
import h5py
sys.path.insert(1, '.')
from vdetlib.utils.protocol import proto_dump, path_to_index, proto_load
def save_if_not_exist(proto, path):
if not os.path.isfile(path):
proto_dump(box_proto, path)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('proposal_file')
parser.add_argument('vid_root')
parser.add_argument('save_root')
args = parser.parse_args()
if not os.path.isdir(args.save_root):
os.makedirs(args.save_root)
mat_file = sio.loadmat(args.proposal_file)
image_names = mat_file['images']
all_boxes = mat_file['boxes']
cur_vid_name = None
for [[image_name]], [boxes] in zip(image_names, all_boxes):
parts = image_name.split('/')
if len(parts) == 3:
subset, video_name, frame_name = parts
elif len(parts) == 4:
__, subset, video_name, frame_name = parts
else:
raise ValueError('image name has {} components: {}'.format(
len(parts), image_name))
# start a new video
if cur_vid_name != video_name:
if cur_vid_name is not None:
print "Saving {}...".format(cur_vid_name)
save_if_not_exist(box_proto,
os.path.join(args.save_root, cur_vid_name+'.box'))
print "Processsing {}...".format(video_name)
box_proto = {}
box_proto['video'] = video_name
box_proto['boxes'] = []
cur_vid_name = video_name
# read vid_proto
vid_proto = proto_load(
os.path.join(args.vid_root, cur_vid_name+'.vid'))
# process boxes
frame_idx = path_to_index(vid_proto, frame_name)
for box in boxes:
# pdb.set_trace()
box_proto['boxes'].append(
{
"frame": frame_idx,
"bbox": box[0:4].tolist(),
"positive": True if box[4] == 1 else False
}
)
# save last proto
save_if_not_exist(box_proto,
os.path.join(args.save_root, cur_vid_name+'.box.gz'))
|
Add script to generate box proto files from proposal mat files.
|
Add script to generate box proto files from proposal mat files.
|
Python
|
mit
|
myfavouritekk/TPN
|
Add script to generate box proto files from proposal mat files.
|
#!/usr/bin/env python
import os, sys
import pdb
import argparse
import scipy.io as sio
import h5py
sys.path.insert(1, '.')
from vdetlib.utils.protocol import proto_dump, path_to_index, proto_load
def save_if_not_exist(proto, path):
if not os.path.isfile(path):
proto_dump(box_proto, path)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('proposal_file')
parser.add_argument('vid_root')
parser.add_argument('save_root')
args = parser.parse_args()
if not os.path.isdir(args.save_root):
os.makedirs(args.save_root)
mat_file = sio.loadmat(args.proposal_file)
image_names = mat_file['images']
all_boxes = mat_file['boxes']
cur_vid_name = None
for [[image_name]], [boxes] in zip(image_names, all_boxes):
parts = image_name.split('/')
if len(parts) == 3:
subset, video_name, frame_name = parts
elif len(parts) == 4:
__, subset, video_name, frame_name = parts
else:
raise ValueError('image name has {} components: {}'.format(
len(parts), image_name))
# start a new video
if cur_vid_name != video_name:
if cur_vid_name is not None:
print "Saving {}...".format(cur_vid_name)
save_if_not_exist(box_proto,
os.path.join(args.save_root, cur_vid_name+'.box'))
print "Processsing {}...".format(video_name)
box_proto = {}
box_proto['video'] = video_name
box_proto['boxes'] = []
cur_vid_name = video_name
# read vid_proto
vid_proto = proto_load(
os.path.join(args.vid_root, cur_vid_name+'.vid'))
# process boxes
frame_idx = path_to_index(vid_proto, frame_name)
for box in boxes:
# pdb.set_trace()
box_proto['boxes'].append(
{
"frame": frame_idx,
"bbox": box[0:4].tolist(),
"positive": True if box[4] == 1 else False
}
)
# save last proto
save_if_not_exist(box_proto,
os.path.join(args.save_root, cur_vid_name+'.box.gz'))
|
<commit_before><commit_msg>Add script to generate box proto files from proposal mat files.<commit_after>
|
#!/usr/bin/env python
import os, sys
import pdb
import argparse
import scipy.io as sio
import h5py
sys.path.insert(1, '.')
from vdetlib.utils.protocol import proto_dump, path_to_index, proto_load
def save_if_not_exist(proto, path):
if not os.path.isfile(path):
proto_dump(box_proto, path)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('proposal_file')
parser.add_argument('vid_root')
parser.add_argument('save_root')
args = parser.parse_args()
if not os.path.isdir(args.save_root):
os.makedirs(args.save_root)
mat_file = sio.loadmat(args.proposal_file)
image_names = mat_file['images']
all_boxes = mat_file['boxes']
cur_vid_name = None
for [[image_name]], [boxes] in zip(image_names, all_boxes):
parts = image_name.split('/')
if len(parts) == 3:
subset, video_name, frame_name = parts
elif len(parts) == 4:
__, subset, video_name, frame_name = parts
else:
raise ValueError('image name has {} components: {}'.format(
len(parts), image_name))
# start a new video
if cur_vid_name != video_name:
if cur_vid_name is not None:
print "Saving {}...".format(cur_vid_name)
save_if_not_exist(box_proto,
os.path.join(args.save_root, cur_vid_name+'.box'))
print "Processsing {}...".format(video_name)
box_proto = {}
box_proto['video'] = video_name
box_proto['boxes'] = []
cur_vid_name = video_name
# read vid_proto
vid_proto = proto_load(
os.path.join(args.vid_root, cur_vid_name+'.vid'))
# process boxes
frame_idx = path_to_index(vid_proto, frame_name)
for box in boxes:
# pdb.set_trace()
box_proto['boxes'].append(
{
"frame": frame_idx,
"bbox": box[0:4].tolist(),
"positive": True if box[4] == 1 else False
}
)
# save last proto
save_if_not_exist(box_proto,
os.path.join(args.save_root, cur_vid_name+'.box.gz'))
|
Add script to generate box proto files from proposal mat files.#!/usr/bin/env python
import os, sys
import pdb
import argparse
import scipy.io as sio
import h5py
sys.path.insert(1, '.')
from vdetlib.utils.protocol import proto_dump, path_to_index, proto_load
def save_if_not_exist(proto, path):
if not os.path.isfile(path):
proto_dump(box_proto, path)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('proposal_file')
parser.add_argument('vid_root')
parser.add_argument('save_root')
args = parser.parse_args()
if not os.path.isdir(args.save_root):
os.makedirs(args.save_root)
mat_file = sio.loadmat(args.proposal_file)
image_names = mat_file['images']
all_boxes = mat_file['boxes']
cur_vid_name = None
for [[image_name]], [boxes] in zip(image_names, all_boxes):
parts = image_name.split('/')
if len(parts) == 3:
subset, video_name, frame_name = parts
elif len(parts) == 4:
__, subset, video_name, frame_name = parts
else:
raise ValueError('image name has {} components: {}'.format(
len(parts), image_name))
# start a new video
if cur_vid_name != video_name:
if cur_vid_name is not None:
print "Saving {}...".format(cur_vid_name)
save_if_not_exist(box_proto,
os.path.join(args.save_root, cur_vid_name+'.box'))
print "Processsing {}...".format(video_name)
box_proto = {}
box_proto['video'] = video_name
box_proto['boxes'] = []
cur_vid_name = video_name
# read vid_proto
vid_proto = proto_load(
os.path.join(args.vid_root, cur_vid_name+'.vid'))
# process boxes
frame_idx = path_to_index(vid_proto, frame_name)
for box in boxes:
# pdb.set_trace()
box_proto['boxes'].append(
{
"frame": frame_idx,
"bbox": box[0:4].tolist(),
"positive": True if box[4] == 1 else False
}
)
# save last proto
save_if_not_exist(box_proto,
os.path.join(args.save_root, cur_vid_name+'.box.gz'))
|
<commit_before><commit_msg>Add script to generate box proto files from proposal mat files.<commit_after>#!/usr/bin/env python
import os, sys
import pdb
import argparse
import scipy.io as sio
import h5py
sys.path.insert(1, '.')
from vdetlib.utils.protocol import proto_dump, path_to_index, proto_load
def save_if_not_exist(proto, path):
if not os.path.isfile(path):
proto_dump(box_proto, path)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('proposal_file')
parser.add_argument('vid_root')
parser.add_argument('save_root')
args = parser.parse_args()
if not os.path.isdir(args.save_root):
os.makedirs(args.save_root)
mat_file = sio.loadmat(args.proposal_file)
image_names = mat_file['images']
all_boxes = mat_file['boxes']
cur_vid_name = None
for [[image_name]], [boxes] in zip(image_names, all_boxes):
parts = image_name.split('/')
if len(parts) == 3:
subset, video_name, frame_name = parts
elif len(parts) == 4:
__, subset, video_name, frame_name = parts
else:
raise ValueError('image name has {} components: {}'.format(
len(parts), image_name))
# start a new video
if cur_vid_name != video_name:
if cur_vid_name is not None:
print "Saving {}...".format(cur_vid_name)
save_if_not_exist(box_proto,
os.path.join(args.save_root, cur_vid_name+'.box'))
print "Processsing {}...".format(video_name)
box_proto = {}
box_proto['video'] = video_name
box_proto['boxes'] = []
cur_vid_name = video_name
# read vid_proto
vid_proto = proto_load(
os.path.join(args.vid_root, cur_vid_name+'.vid'))
# process boxes
frame_idx = path_to_index(vid_proto, frame_name)
for box in boxes:
# pdb.set_trace()
box_proto['boxes'].append(
{
"frame": frame_idx,
"bbox": box[0:4].tolist(),
"positive": True if box[4] == 1 else False
}
)
# save last proto
save_if_not_exist(box_proto,
os.path.join(args.save_root, cur_vid_name+'.box.gz'))
|
|
ebb07da268eb57d1b8145eb2f7312b412c817217
|
tests/unit/asyncio/test_asyncio_repr.py
|
tests/unit/asyncio/test_asyncio_repr.py
|
from butter.asyncio.eventfd import Eventfd_async
from butter.asyncio.fanotify import Fanotify_async
from butter.asyncio.inotify import Inotify_async
from butter.asyncio.signalfd import Signalfd_async
from butter.asyncio.timerfd import Timerfd_async
from collections import namedtuple
import pytest
import sys
class Mock_fd_obj(object):
def __init__(self, fd):
self._fd = fd
@pytest.fixture(params=[(Eventfd_async, '_eventfd' ),
(Fanotify_async, '_fanotify'),
(Inotify_async, '_inotify' ),
(Signalfd_async, '_signalfd'),
(Timerfd_async, '_timerfd' )])
def obj(request):
Obj, sub_obj_name = request.param
o = Obj.__new__(Obj)
o._value = 3 # needed for eventfd
sub_obj = Mock_fd_obj(1) #fd=1
setattr(o, sub_obj_name, sub_obj)
return o
@pytest.fixture(params=[(Eventfd_async, '_eventfd' ),
(Fanotify_async, '_fanotify'),
(Inotify_async, '_inotify' ),
(Signalfd_async, '_signalfd'),
(Timerfd_async, '_timerfd' )])
def obj_closed(request):
Obj, sub_obj_name = request.param
o = Obj.__new__(Obj)
o._value = 3 # needed for eventfd
sub_obj = Mock_fd_obj(None)
setattr(o, sub_obj_name, sub_obj)
return o
@pytest.mark.skipif(sys.version_info < (3,4), reason="requires python3.4/asyncio")
@pytest.mark.repr
@pytest.mark.unit
@pytest.mark.asyncio
def test_repr_name(obj):
assert obj.__class__.__name__ in repr(obj), "Instance's representation does not contain its own name"
@pytest.mark.skipif(sys.version_info < (3,4), reason="requires python3.4/asyncio")
@pytest.mark.repr
@pytest.mark.unit
@pytest.mark.asyncio
def test_repr_fd(obj):
assert 'fd=1' in repr(obj), "Instance does not list its own fd (used for easy identifcation)"
@pytest.mark.skipif(sys.version_info < (3,4), reason="requires python3.4/asyncio")
@pytest.mark.repr
@pytest.mark.unit
@pytest.mark.asyncio
def test_repr_fd_closed(obj_closed):
assert 'fd=closed' in repr(obj_closed), "Instance does not indicate it is closed"
|
Test async objects for __repr__ as well
|
Test async objects for __repr__ as well
|
Python
|
bsd-3-clause
|
dasSOZO/python-butter,wdv4758h/butter
|
Test async objects for __repr__ as well
|
from butter.asyncio.eventfd import Eventfd_async
from butter.asyncio.fanotify import Fanotify_async
from butter.asyncio.inotify import Inotify_async
from butter.asyncio.signalfd import Signalfd_async
from butter.asyncio.timerfd import Timerfd_async
from collections import namedtuple
import pytest
import sys
class Mock_fd_obj(object):
def __init__(self, fd):
self._fd = fd
@pytest.fixture(params=[(Eventfd_async, '_eventfd' ),
(Fanotify_async, '_fanotify'),
(Inotify_async, '_inotify' ),
(Signalfd_async, '_signalfd'),
(Timerfd_async, '_timerfd' )])
def obj(request):
Obj, sub_obj_name = request.param
o = Obj.__new__(Obj)
o._value = 3 # needed for eventfd
sub_obj = Mock_fd_obj(1) #fd=1
setattr(o, sub_obj_name, sub_obj)
return o
@pytest.fixture(params=[(Eventfd_async, '_eventfd' ),
(Fanotify_async, '_fanotify'),
(Inotify_async, '_inotify' ),
(Signalfd_async, '_signalfd'),
(Timerfd_async, '_timerfd' )])
def obj_closed(request):
Obj, sub_obj_name = request.param
o = Obj.__new__(Obj)
o._value = 3 # needed for eventfd
sub_obj = Mock_fd_obj(None)
setattr(o, sub_obj_name, sub_obj)
return o
@pytest.mark.skipif(sys.version_info < (3,4), reason="requires python3.4/asyncio")
@pytest.mark.repr
@pytest.mark.unit
@pytest.mark.asyncio
def test_repr_name(obj):
assert obj.__class__.__name__ in repr(obj), "Instance's representation does not contain its own name"
@pytest.mark.skipif(sys.version_info < (3,4), reason="requires python3.4/asyncio")
@pytest.mark.repr
@pytest.mark.unit
@pytest.mark.asyncio
def test_repr_fd(obj):
assert 'fd=1' in repr(obj), "Instance does not list its own fd (used for easy identifcation)"
@pytest.mark.skipif(sys.version_info < (3,4), reason="requires python3.4/asyncio")
@pytest.mark.repr
@pytest.mark.unit
@pytest.mark.asyncio
def test_repr_fd_closed(obj_closed):
assert 'fd=closed' in repr(obj_closed), "Instance does not indicate it is closed"
|
<commit_before><commit_msg>Test async objects for __repr__ as well<commit_after>
|
from butter.asyncio.eventfd import Eventfd_async
from butter.asyncio.fanotify import Fanotify_async
from butter.asyncio.inotify import Inotify_async
from butter.asyncio.signalfd import Signalfd_async
from butter.asyncio.timerfd import Timerfd_async
from collections import namedtuple
import pytest
import sys
class Mock_fd_obj(object):
def __init__(self, fd):
self._fd = fd
@pytest.fixture(params=[(Eventfd_async, '_eventfd' ),
(Fanotify_async, '_fanotify'),
(Inotify_async, '_inotify' ),
(Signalfd_async, '_signalfd'),
(Timerfd_async, '_timerfd' )])
def obj(request):
Obj, sub_obj_name = request.param
o = Obj.__new__(Obj)
o._value = 3 # needed for eventfd
sub_obj = Mock_fd_obj(1) #fd=1
setattr(o, sub_obj_name, sub_obj)
return o
@pytest.fixture(params=[(Eventfd_async, '_eventfd' ),
(Fanotify_async, '_fanotify'),
(Inotify_async, '_inotify' ),
(Signalfd_async, '_signalfd'),
(Timerfd_async, '_timerfd' )])
def obj_closed(request):
Obj, sub_obj_name = request.param
o = Obj.__new__(Obj)
o._value = 3 # needed for eventfd
sub_obj = Mock_fd_obj(None)
setattr(o, sub_obj_name, sub_obj)
return o
@pytest.mark.skipif(sys.version_info < (3,4), reason="requires python3.4/asyncio")
@pytest.mark.repr
@pytest.mark.unit
@pytest.mark.asyncio
def test_repr_name(obj):
assert obj.__class__.__name__ in repr(obj), "Instance's representation does not contain its own name"
@pytest.mark.skipif(sys.version_info < (3,4), reason="requires python3.4/asyncio")
@pytest.mark.repr
@pytest.mark.unit
@pytest.mark.asyncio
def test_repr_fd(obj):
assert 'fd=1' in repr(obj), "Instance does not list its own fd (used for easy identifcation)"
@pytest.mark.skipif(sys.version_info < (3,4), reason="requires python3.4/asyncio")
@pytest.mark.repr
@pytest.mark.unit
@pytest.mark.asyncio
def test_repr_fd_closed(obj_closed):
assert 'fd=closed' in repr(obj_closed), "Instance does not indicate it is closed"
|
Test async objects for __repr__ as wellfrom butter.asyncio.eventfd import Eventfd_async
from butter.asyncio.fanotify import Fanotify_async
from butter.asyncio.inotify import Inotify_async
from butter.asyncio.signalfd import Signalfd_async
from butter.asyncio.timerfd import Timerfd_async
from collections import namedtuple
import pytest
import sys
class Mock_fd_obj(object):
def __init__(self, fd):
self._fd = fd
@pytest.fixture(params=[(Eventfd_async, '_eventfd' ),
(Fanotify_async, '_fanotify'),
(Inotify_async, '_inotify' ),
(Signalfd_async, '_signalfd'),
(Timerfd_async, '_timerfd' )])
def obj(request):
Obj, sub_obj_name = request.param
o = Obj.__new__(Obj)
o._value = 3 # needed for eventfd
sub_obj = Mock_fd_obj(1) #fd=1
setattr(o, sub_obj_name, sub_obj)
return o
@pytest.fixture(params=[(Eventfd_async, '_eventfd' ),
(Fanotify_async, '_fanotify'),
(Inotify_async, '_inotify' ),
(Signalfd_async, '_signalfd'),
(Timerfd_async, '_timerfd' )])
def obj_closed(request):
Obj, sub_obj_name = request.param
o = Obj.__new__(Obj)
o._value = 3 # needed for eventfd
sub_obj = Mock_fd_obj(None)
setattr(o, sub_obj_name, sub_obj)
return o
@pytest.mark.skipif(sys.version_info < (3,4), reason="requires python3.4/asyncio")
@pytest.mark.repr
@pytest.mark.unit
@pytest.mark.asyncio
def test_repr_name(obj):
assert obj.__class__.__name__ in repr(obj), "Instance's representation does not contain its own name"
@pytest.mark.skipif(sys.version_info < (3,4), reason="requires python3.4/asyncio")
@pytest.mark.repr
@pytest.mark.unit
@pytest.mark.asyncio
def test_repr_fd(obj):
assert 'fd=1' in repr(obj), "Instance does not list its own fd (used for easy identifcation)"
@pytest.mark.skipif(sys.version_info < (3,4), reason="requires python3.4/asyncio")
@pytest.mark.repr
@pytest.mark.unit
@pytest.mark.asyncio
def test_repr_fd_closed(obj_closed):
assert 'fd=closed' in repr(obj_closed), "Instance does not indicate it is closed"
|
<commit_before><commit_msg>Test async objects for __repr__ as well<commit_after>from butter.asyncio.eventfd import Eventfd_async
from butter.asyncio.fanotify import Fanotify_async
from butter.asyncio.inotify import Inotify_async
from butter.asyncio.signalfd import Signalfd_async
from butter.asyncio.timerfd import Timerfd_async
from collections import namedtuple
import pytest
import sys
class Mock_fd_obj(object):
def __init__(self, fd):
self._fd = fd
@pytest.fixture(params=[(Eventfd_async, '_eventfd' ),
(Fanotify_async, '_fanotify'),
(Inotify_async, '_inotify' ),
(Signalfd_async, '_signalfd'),
(Timerfd_async, '_timerfd' )])
def obj(request):
Obj, sub_obj_name = request.param
o = Obj.__new__(Obj)
o._value = 3 # needed for eventfd
sub_obj = Mock_fd_obj(1) #fd=1
setattr(o, sub_obj_name, sub_obj)
return o
@pytest.fixture(params=[(Eventfd_async, '_eventfd' ),
(Fanotify_async, '_fanotify'),
(Inotify_async, '_inotify' ),
(Signalfd_async, '_signalfd'),
(Timerfd_async, '_timerfd' )])
def obj_closed(request):
Obj, sub_obj_name = request.param
o = Obj.__new__(Obj)
o._value = 3 # needed for eventfd
sub_obj = Mock_fd_obj(None)
setattr(o, sub_obj_name, sub_obj)
return o
@pytest.mark.skipif(sys.version_info < (3,4), reason="requires python3.4/asyncio")
@pytest.mark.repr
@pytest.mark.unit
@pytest.mark.asyncio
def test_repr_name(obj):
assert obj.__class__.__name__ in repr(obj), "Instance's representation does not contain its own name"
@pytest.mark.skipif(sys.version_info < (3,4), reason="requires python3.4/asyncio")
@pytest.mark.repr
@pytest.mark.unit
@pytest.mark.asyncio
def test_repr_fd(obj):
assert 'fd=1' in repr(obj), "Instance does not list its own fd (used for easy identifcation)"
@pytest.mark.skipif(sys.version_info < (3,4), reason="requires python3.4/asyncio")
@pytest.mark.repr
@pytest.mark.unit
@pytest.mark.asyncio
def test_repr_fd_closed(obj_closed):
assert 'fd=closed' in repr(obj_closed), "Instance does not indicate it is closed"
|
|
70cad3e6f9b2ceac0ab7082843e6902e1934afe0
|
tohu/v5/extras.py
|
tohu/v5/extras.py
|
from .base import TohuBaseGenerator
from .primitive_generators import Constant
__all__ = ['as_tohu_generator']
def as_tohu_generator(g):
"""
If g is a tohu generator return it unchanged,
otherwise wrap it in a Constant generator.
"""
if isinstance(g, TohuBaseGenerator):
return g
else:
return Constant(g)
|
Add helper function to ensure an input value is a tohu generator
|
Add helper function to ensure an input value is a tohu generator
|
Python
|
mit
|
maxalbert/tohu
|
Add helper function to ensure an input value is a tohu generator
|
from .base import TohuBaseGenerator
from .primitive_generators import Constant
__all__ = ['as_tohu_generator']
def as_tohu_generator(g):
"""
If g is a tohu generator return it unchanged,
otherwise wrap it in a Constant generator.
"""
if isinstance(g, TohuBaseGenerator):
return g
else:
return Constant(g)
|
<commit_before><commit_msg>Add helper function to ensure an input value is a tohu generator<commit_after>
|
from .base import TohuBaseGenerator
from .primitive_generators import Constant
__all__ = ['as_tohu_generator']
def as_tohu_generator(g):
"""
If g is a tohu generator return it unchanged,
otherwise wrap it in a Constant generator.
"""
if isinstance(g, TohuBaseGenerator):
return g
else:
return Constant(g)
|
Add helper function to ensure an input value is a tohu generatorfrom .base import TohuBaseGenerator
from .primitive_generators import Constant
__all__ = ['as_tohu_generator']
def as_tohu_generator(g):
"""
If g is a tohu generator return it unchanged,
otherwise wrap it in a Constant generator.
"""
if isinstance(g, TohuBaseGenerator):
return g
else:
return Constant(g)
|
<commit_before><commit_msg>Add helper function to ensure an input value is a tohu generator<commit_after>from .base import TohuBaseGenerator
from .primitive_generators import Constant
__all__ = ['as_tohu_generator']
def as_tohu_generator(g):
"""
If g is a tohu generator return it unchanged,
otherwise wrap it in a Constant generator.
"""
if isinstance(g, TohuBaseGenerator):
return g
else:
return Constant(g)
|
|
2e2c5d394e57837386d17d24e219cadbd625aa09
|
nbs/utils.py
|
nbs/utils.py
|
# -*- coding: utf-8 -*-
from werkzeug.routing import BaseConverter
class ListConverter(BaseConverter):
_subtypes = {
'int': int,
'str': str,
'u': unicode,
}
def __init__(self, url_map, subtype=None, mutable=False):
super(ListConverter, self).__init__(url_map)
self.subtype = subtype
self.mutable = mutable
if subtype:
rearg = {'int': '\d', 'str': '\w', 'u': '\w'}[subtype]
else:
rearg = '[\d\w]'
self.regex = '{0}+(?:,{0}*)+'.format(rearg)
def to_python(self, value):
retval = filter(None, value.split(','))
if self.subtype in self._subtypes:
retval = map(self._subtypes[self.subtype], retval)
if not self.mutalbe:
retval = tuple(retval)
return retval
def to_url(self, value):
return ','.join(BaseConverter.to_url(value) for value in values)
|
Add ListConverter to url parser
|
Add ListConverter to url parser
|
Python
|
mit
|
coyotevz/nobix-app
|
Add ListConverter to url parser
|
# -*- coding: utf-8 -*-
from werkzeug.routing import BaseConverter
class ListConverter(BaseConverter):
_subtypes = {
'int': int,
'str': str,
'u': unicode,
}
def __init__(self, url_map, subtype=None, mutable=False):
super(ListConverter, self).__init__(url_map)
self.subtype = subtype
self.mutable = mutable
if subtype:
rearg = {'int': '\d', 'str': '\w', 'u': '\w'}[subtype]
else:
rearg = '[\d\w]'
self.regex = '{0}+(?:,{0}*)+'.format(rearg)
def to_python(self, value):
retval = filter(None, value.split(','))
if self.subtype in self._subtypes:
retval = map(self._subtypes[self.subtype], retval)
if not self.mutalbe:
retval = tuple(retval)
return retval
def to_url(self, value):
return ','.join(BaseConverter.to_url(value) for value in values)
|
<commit_before><commit_msg>Add ListConverter to url parser<commit_after>
|
# -*- coding: utf-8 -*-
from werkzeug.routing import BaseConverter
class ListConverter(BaseConverter):
_subtypes = {
'int': int,
'str': str,
'u': unicode,
}
def __init__(self, url_map, subtype=None, mutable=False):
super(ListConverter, self).__init__(url_map)
self.subtype = subtype
self.mutable = mutable
if subtype:
rearg = {'int': '\d', 'str': '\w', 'u': '\w'}[subtype]
else:
rearg = '[\d\w]'
self.regex = '{0}+(?:,{0}*)+'.format(rearg)
def to_python(self, value):
retval = filter(None, value.split(','))
if self.subtype in self._subtypes:
retval = map(self._subtypes[self.subtype], retval)
if not self.mutalbe:
retval = tuple(retval)
return retval
def to_url(self, value):
return ','.join(BaseConverter.to_url(value) for value in values)
|
Add ListConverter to url parser# -*- coding: utf-8 -*-
from werkzeug.routing import BaseConverter
class ListConverter(BaseConverter):
_subtypes = {
'int': int,
'str': str,
'u': unicode,
}
def __init__(self, url_map, subtype=None, mutable=False):
super(ListConverter, self).__init__(url_map)
self.subtype = subtype
self.mutable = mutable
if subtype:
rearg = {'int': '\d', 'str': '\w', 'u': '\w'}[subtype]
else:
rearg = '[\d\w]'
self.regex = '{0}+(?:,{0}*)+'.format(rearg)
def to_python(self, value):
retval = filter(None, value.split(','))
if self.subtype in self._subtypes:
retval = map(self._subtypes[self.subtype], retval)
if not self.mutalbe:
retval = tuple(retval)
return retval
def to_url(self, value):
return ','.join(BaseConverter.to_url(value) for value in values)
|
<commit_before><commit_msg>Add ListConverter to url parser<commit_after># -*- coding: utf-8 -*-
from werkzeug.routing import BaseConverter
class ListConverter(BaseConverter):
_subtypes = {
'int': int,
'str': str,
'u': unicode,
}
def __init__(self, url_map, subtype=None, mutable=False):
super(ListConverter, self).__init__(url_map)
self.subtype = subtype
self.mutable = mutable
if subtype:
rearg = {'int': '\d', 'str': '\w', 'u': '\w'}[subtype]
else:
rearg = '[\d\w]'
self.regex = '{0}+(?:,{0}*)+'.format(rearg)
def to_python(self, value):
retval = filter(None, value.split(','))
if self.subtype in self._subtypes:
retval = map(self._subtypes[self.subtype], retval)
if not self.mutalbe:
retval = tuple(retval)
return retval
def to_url(self, value):
return ','.join(BaseConverter.to_url(value) for value in values)
|
|
ee2a83e26fc1319e97fe02109bc28a7b061e7a2a
|
tests/test_gen_sql.py
|
tests/test_gen_sql.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
class TestGenSql:
def test_gen_drop_statement(self):
pass
def test_create_statement(self):
pass
|
Add tests placeholder for pg_bawler.gen_sql
|
Add tests placeholder for pg_bawler.gen_sql
|
Python
|
bsd-3-clause
|
beezz/pg_bawler,beezz/pg_bawler
|
Add tests placeholder for pg_bawler.gen_sql
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
class TestGenSql:
def test_gen_drop_statement(self):
pass
def test_create_statement(self):
pass
|
<commit_before><commit_msg>Add tests placeholder for pg_bawler.gen_sql<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
class TestGenSql:
def test_gen_drop_statement(self):
pass
def test_create_statement(self):
pass
|
Add tests placeholder for pg_bawler.gen_sql#!/usr/bin/env python
# -*- coding: utf-8 -*-
class TestGenSql:
def test_gen_drop_statement(self):
pass
def test_create_statement(self):
pass
|
<commit_before><commit_msg>Add tests placeholder for pg_bawler.gen_sql<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
class TestGenSql:
def test_gen_drop_statement(self):
pass
def test_create_statement(self):
pass
|
|
4da185d2902c9a83db1c7022f2e9c65b39957384
|
aerodynamics_2d.py
|
aerodynamics_2d.py
|
"""
This module enables XLFR5 data to numpy.array.
"""
import numpy as np
# import matplotlib.pyplot as plt
def read_xflr5_data(wing_foil_name):
reynolds_numbers = np.arange(0, 1.01, 0.1)
headers = ["alpha", "CL", "CD", "CDp", "Cm", "Top Xtr", "Bot Xtr", "Cpmin", "Chinge", "XCp"]
tmpdict = {i: [] for i in headers}
for reynolds_number in reynolds_numbers:
data = np.genfromtxt(u"{}\\{:.2f}.txt".format(wing_foil_name, reynolds_number), skip_header=11).transpose()
for header, datum in zip(headers, data):
tmpdict[header].append(datum)
return {i: np.array(tmpdict[i]) for i in tmpdict}
# print tmpdict["Cm"]
# for j in range(len(reynolds_numbers)):
# plt.plot(tmpdict["alpha"][j], tmpdict["Cm"][j], label="Reynolds={}".format(j))
# plt.legend(loc=2, fontsize="x-small")
# plt.show()
|
Add 2D wing foil reader
|
Add 2D wing foil reader
|
Python
|
mit
|
salamann/fpadesigner
|
Add 2D wing foil reader
|
"""
This module enables XLFR5 data to numpy.array.
"""
import numpy as np
# import matplotlib.pyplot as plt
def read_xflr5_data(wing_foil_name):
reynolds_numbers = np.arange(0, 1.01, 0.1)
headers = ["alpha", "CL", "CD", "CDp", "Cm", "Top Xtr", "Bot Xtr", "Cpmin", "Chinge", "XCp"]
tmpdict = {i: [] for i in headers}
for reynolds_number in reynolds_numbers:
data = np.genfromtxt(u"{}\\{:.2f}.txt".format(wing_foil_name, reynolds_number), skip_header=11).transpose()
for header, datum in zip(headers, data):
tmpdict[header].append(datum)
return {i: np.array(tmpdict[i]) for i in tmpdict}
# print tmpdict["Cm"]
# for j in range(len(reynolds_numbers)):
# plt.plot(tmpdict["alpha"][j], tmpdict["Cm"][j], label="Reynolds={}".format(j))
# plt.legend(loc=2, fontsize="x-small")
# plt.show()
|
<commit_before><commit_msg>Add 2D wing foil reader<commit_after>
|
"""
This module enables XLFR5 data to numpy.array.
"""
import numpy as np
# import matplotlib.pyplot as plt
def read_xflr5_data(wing_foil_name):
reynolds_numbers = np.arange(0, 1.01, 0.1)
headers = ["alpha", "CL", "CD", "CDp", "Cm", "Top Xtr", "Bot Xtr", "Cpmin", "Chinge", "XCp"]
tmpdict = {i: [] for i in headers}
for reynolds_number in reynolds_numbers:
data = np.genfromtxt(u"{}\\{:.2f}.txt".format(wing_foil_name, reynolds_number), skip_header=11).transpose()
for header, datum in zip(headers, data):
tmpdict[header].append(datum)
return {i: np.array(tmpdict[i]) for i in tmpdict}
# print tmpdict["Cm"]
# for j in range(len(reynolds_numbers)):
# plt.plot(tmpdict["alpha"][j], tmpdict["Cm"][j], label="Reynolds={}".format(j))
# plt.legend(loc=2, fontsize="x-small")
# plt.show()
|
Add 2D wing foil reader"""
This module enables XLFR5 data to numpy.array.
"""
import numpy as np
# import matplotlib.pyplot as plt
def read_xflr5_data(wing_foil_name):
reynolds_numbers = np.arange(0, 1.01, 0.1)
headers = ["alpha", "CL", "CD", "CDp", "Cm", "Top Xtr", "Bot Xtr", "Cpmin", "Chinge", "XCp"]
tmpdict = {i: [] for i in headers}
for reynolds_number in reynolds_numbers:
data = np.genfromtxt(u"{}\\{:.2f}.txt".format(wing_foil_name, reynolds_number), skip_header=11).transpose()
for header, datum in zip(headers, data):
tmpdict[header].append(datum)
return {i: np.array(tmpdict[i]) for i in tmpdict}
# print tmpdict["Cm"]
# for j in range(len(reynolds_numbers)):
# plt.plot(tmpdict["alpha"][j], tmpdict["Cm"][j], label="Reynolds={}".format(j))
# plt.legend(loc=2, fontsize="x-small")
# plt.show()
|
<commit_before><commit_msg>Add 2D wing foil reader<commit_after>"""
This module enables XLFR5 data to numpy.array.
"""
import numpy as np
# import matplotlib.pyplot as plt
def read_xflr5_data(wing_foil_name):
reynolds_numbers = np.arange(0, 1.01, 0.1)
headers = ["alpha", "CL", "CD", "CDp", "Cm", "Top Xtr", "Bot Xtr", "Cpmin", "Chinge", "XCp"]
tmpdict = {i: [] for i in headers}
for reynolds_number in reynolds_numbers:
data = np.genfromtxt(u"{}\\{:.2f}.txt".format(wing_foil_name, reynolds_number), skip_header=11).transpose()
for header, datum in zip(headers, data):
tmpdict[header].append(datum)
return {i: np.array(tmpdict[i]) for i in tmpdict}
# print tmpdict["Cm"]
# for j in range(len(reynolds_numbers)):
# plt.plot(tmpdict["alpha"][j], tmpdict["Cm"][j], label="Reynolds={}".format(j))
# plt.legend(loc=2, fontsize="x-small")
# plt.show()
|
|
b0360709bd80df9756a41089ac0c6a4eb19109df
|
glance/db/sqlalchemy/migrate_repo/versions/018_add_image_locations_table.py
|
glance/db/sqlalchemy/migrate_repo/versions/018_add_image_locations_table.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy
from glance.db.sqlalchemy.migrate_repo import schema
def upgrade(migrate_engine):
meta = sqlalchemy.schema.MetaData(migrate_engine)
#NOTE(bcwaldon): load the images table for the ForeignKey below
sqlalchemy.Table('images', meta, autoload=True)
image_locations_table = sqlalchemy.Table(
'image_locations', meta,
sqlalchemy.Column('id',
schema.Integer(),
primary_key=True,
nullable=False),
sqlalchemy.Column('image_id',
schema.String(36),
sqlalchemy.ForeignKey('images.id'),
nullable=False,
index=True),
sqlalchemy.Column('value',
schema.Text(),
nullable=False),
sqlalchemy.Column('created_at',
schema.DateTime(),
nullable=False),
sqlalchemy.Column('updated_at',
schema.DateTime()),
sqlalchemy.Column('deleted_at',
schema.DateTime()),
sqlalchemy.Column('deleted',
schema.Boolean(),
nullable=False,
default=False,
index=True),
)
schema.create_tables([image_locations_table])
def downgrade(migrate_engine):
meta = sqlalchemy.schema.MetaData(migrate_engine)
image_locations_table = sqlalchemy.Table('image_locations', meta,
autoload=True)
schema.drop_tables([image_locations_table])
|
Add migration 18 - create the image_locations table
|
Add migration 18 - create the image_locations table
This migration simply creates the new image_locations table. It does
not yet use the table for anything.
Related to bp multiple-image-locations
Change-Id: I1536c9276bc807aac26849e37556b465e8b5c9eb
|
Python
|
apache-2.0
|
rajalokan/glance,rickerc/glance_audit,ntt-sic/glance,kfwang/Glance-OVA-OVF,sigmavirus24/glance,wkoathp/glance,JioCloud/glance,vuntz/glance,citrix-openstack-build/glance,takeshineshiro/glance,cloudbau/glance,redhat-openstack/glance,tanglei528/glance,ntt-sic/glance,stevelle/glance,saeki-masaki/glance,dims/glance,saeki-masaki/glance,wkoathp/glance,takeshineshiro/glance,vuntz/glance,klmitch/glance,scripnichenko/glance,rickerc/glance_audit,JioCloud/glance,openstack/glance,ozamiatin/glance,dims/glance,paramite/glance,jumpstarter-io/glance,citrix-openstack-build/glance,cloudbau/glance,openstack/glance,sigmavirus24/glance,SUSE-Cloud/glance,tanglei528/glance,jumpstarter-io/glance,rajalokan/glance,redhat-openstack/glance,paramite/glance,akash1808/glance,stevelle/glance,scripnichenko/glance,klmitch/glance,kfwang/Glance-OVA-OVF,darren-wang/gl,akash1808/glance,openstack/glance,ozamiatin/glance,SUSE-Cloud/glance,darren-wang/gl
|
Add migration 18 - create the image_locations table
This migration simply creates the new image_locations table. It does
not yet use the table for anything.
Related to bp multiple-image-locations
Change-Id: I1536c9276bc807aac26849e37556b465e8b5c9eb
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy
from glance.db.sqlalchemy.migrate_repo import schema
def upgrade(migrate_engine):
meta = sqlalchemy.schema.MetaData(migrate_engine)
#NOTE(bcwaldon): load the images table for the ForeignKey below
sqlalchemy.Table('images', meta, autoload=True)
image_locations_table = sqlalchemy.Table(
'image_locations', meta,
sqlalchemy.Column('id',
schema.Integer(),
primary_key=True,
nullable=False),
sqlalchemy.Column('image_id',
schema.String(36),
sqlalchemy.ForeignKey('images.id'),
nullable=False,
index=True),
sqlalchemy.Column('value',
schema.Text(),
nullable=False),
sqlalchemy.Column('created_at',
schema.DateTime(),
nullable=False),
sqlalchemy.Column('updated_at',
schema.DateTime()),
sqlalchemy.Column('deleted_at',
schema.DateTime()),
sqlalchemy.Column('deleted',
schema.Boolean(),
nullable=False,
default=False,
index=True),
)
schema.create_tables([image_locations_table])
def downgrade(migrate_engine):
meta = sqlalchemy.schema.MetaData(migrate_engine)
image_locations_table = sqlalchemy.Table('image_locations', meta,
autoload=True)
schema.drop_tables([image_locations_table])
|
<commit_before><commit_msg>Add migration 18 - create the image_locations table
This migration simply creates the new image_locations table. It does
not yet use the table for anything.
Related to bp multiple-image-locations
Change-Id: I1536c9276bc807aac26849e37556b465e8b5c9eb<commit_after>
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy
from glance.db.sqlalchemy.migrate_repo import schema
def upgrade(migrate_engine):
meta = sqlalchemy.schema.MetaData(migrate_engine)
#NOTE(bcwaldon): load the images table for the ForeignKey below
sqlalchemy.Table('images', meta, autoload=True)
image_locations_table = sqlalchemy.Table(
'image_locations', meta,
sqlalchemy.Column('id',
schema.Integer(),
primary_key=True,
nullable=False),
sqlalchemy.Column('image_id',
schema.String(36),
sqlalchemy.ForeignKey('images.id'),
nullable=False,
index=True),
sqlalchemy.Column('value',
schema.Text(),
nullable=False),
sqlalchemy.Column('created_at',
schema.DateTime(),
nullable=False),
sqlalchemy.Column('updated_at',
schema.DateTime()),
sqlalchemy.Column('deleted_at',
schema.DateTime()),
sqlalchemy.Column('deleted',
schema.Boolean(),
nullable=False,
default=False,
index=True),
)
schema.create_tables([image_locations_table])
def downgrade(migrate_engine):
meta = sqlalchemy.schema.MetaData(migrate_engine)
image_locations_table = sqlalchemy.Table('image_locations', meta,
autoload=True)
schema.drop_tables([image_locations_table])
|
Add migration 18 - create the image_locations table
This migration simply creates the new image_locations table. It does
not yet use the table for anything.
Related to bp multiple-image-locations
Change-Id: I1536c9276bc807aac26849e37556b465e8b5c9eb# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy
from glance.db.sqlalchemy.migrate_repo import schema
def upgrade(migrate_engine):
meta = sqlalchemy.schema.MetaData(migrate_engine)
#NOTE(bcwaldon): load the images table for the ForeignKey below
sqlalchemy.Table('images', meta, autoload=True)
image_locations_table = sqlalchemy.Table(
'image_locations', meta,
sqlalchemy.Column('id',
schema.Integer(),
primary_key=True,
nullable=False),
sqlalchemy.Column('image_id',
schema.String(36),
sqlalchemy.ForeignKey('images.id'),
nullable=False,
index=True),
sqlalchemy.Column('value',
schema.Text(),
nullable=False),
sqlalchemy.Column('created_at',
schema.DateTime(),
nullable=False),
sqlalchemy.Column('updated_at',
schema.DateTime()),
sqlalchemy.Column('deleted_at',
schema.DateTime()),
sqlalchemy.Column('deleted',
schema.Boolean(),
nullable=False,
default=False,
index=True),
)
schema.create_tables([image_locations_table])
def downgrade(migrate_engine):
meta = sqlalchemy.schema.MetaData(migrate_engine)
image_locations_table = sqlalchemy.Table('image_locations', meta,
autoload=True)
schema.drop_tables([image_locations_table])
|
<commit_before><commit_msg>Add migration 18 - create the image_locations table
This migration simply creates the new image_locations table. It does
not yet use the table for anything.
Related to bp multiple-image-locations
Change-Id: I1536c9276bc807aac26849e37556b465e8b5c9eb<commit_after># vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy
from glance.db.sqlalchemy.migrate_repo import schema
def upgrade(migrate_engine):
meta = sqlalchemy.schema.MetaData(migrate_engine)
#NOTE(bcwaldon): load the images table for the ForeignKey below
sqlalchemy.Table('images', meta, autoload=True)
image_locations_table = sqlalchemy.Table(
'image_locations', meta,
sqlalchemy.Column('id',
schema.Integer(),
primary_key=True,
nullable=False),
sqlalchemy.Column('image_id',
schema.String(36),
sqlalchemy.ForeignKey('images.id'),
nullable=False,
index=True),
sqlalchemy.Column('value',
schema.Text(),
nullable=False),
sqlalchemy.Column('created_at',
schema.DateTime(),
nullable=False),
sqlalchemy.Column('updated_at',
schema.DateTime()),
sqlalchemy.Column('deleted_at',
schema.DateTime()),
sqlalchemy.Column('deleted',
schema.Boolean(),
nullable=False,
default=False,
index=True),
)
schema.create_tables([image_locations_table])
def downgrade(migrate_engine):
meta = sqlalchemy.schema.MetaData(migrate_engine)
image_locations_table = sqlalchemy.Table('image_locations', meta,
autoload=True)
schema.drop_tables([image_locations_table])
|
|
8e773cb6df43c697e0d088813cf143fff9453624
|
pyQuantuccia/tests/test_quantuccia.py
|
pyQuantuccia/tests/test_quantuccia.py
|
import pytest
from pyQuantuccia import quantuccia
def test_function_fails_correctly_on_bad_input():
"""
If we send in something which isn't three
appropriate ints, something reasonable should
happen.
"""
with pytest.raises(Exception):
quantuccia.united_kingdom_is_business_day(1, 2, 3, 4)
|
Add a test which directly calls the c function.
|
Add a test which directly calls the c function.
|
Python
|
bsd-3-clause
|
jwg4/pyQuantuccia,jwg4/pyQuantuccia
|
Add a test which directly calls the c function.
|
import pytest
from pyQuantuccia import quantuccia
def test_function_fails_correctly_on_bad_input():
"""
If we send in something which isn't three
appropriate ints, something reasonable should
happen.
"""
with pytest.raises(Exception):
quantuccia.united_kingdom_is_business_day(1, 2, 3, 4)
|
<commit_before><commit_msg>Add a test which directly calls the c function.<commit_after>
|
import pytest
from pyQuantuccia import quantuccia
def test_function_fails_correctly_on_bad_input():
"""
If we send in something which isn't three
appropriate ints, something reasonable should
happen.
"""
with pytest.raises(Exception):
quantuccia.united_kingdom_is_business_day(1, 2, 3, 4)
|
Add a test which directly calls the c function.import pytest
from pyQuantuccia import quantuccia
def test_function_fails_correctly_on_bad_input():
"""
If we send in something which isn't three
appropriate ints, something reasonable should
happen.
"""
with pytest.raises(Exception):
quantuccia.united_kingdom_is_business_day(1, 2, 3, 4)
|
<commit_before><commit_msg>Add a test which directly calls the c function.<commit_after>import pytest
from pyQuantuccia import quantuccia
def test_function_fails_correctly_on_bad_input():
"""
If we send in something which isn't three
appropriate ints, something reasonable should
happen.
"""
with pytest.raises(Exception):
quantuccia.united_kingdom_is_business_day(1, 2, 3, 4)
|
|
84ffbc997ae289eba1c8cb0666f05c54c37e5704
|
dmrx_most_heard_n0gsg.py
|
dmrx_most_heard_n0gsg.py
|
#!/usr/bin/env python2
import csv
# dmrx.net uses SSL SNI, which urllib2 doesn't support
import requests
from dmr_marc_users_cs750 import get_groups
MOST_HEARD_URL = 'https://dmrx.net/csvfiles/MostHeard.csv'
# Neither of these formats uses a header row
COLUMNS_N0GSG = ('Call ID', 'Call Alias', 'Call Type', 'Receive Tone')
COLUMNS_DMRX = ('id', 'callsign', 'name')
def read_most_heard_csv(users):
"""Reads DMRX csv from the heard file-like object and returns a list of
dicts in N0GSG export format."""
csvr = csv.DictReader(users, fieldnames=COLUMNS_DMRX)
result = []
for row in csvr:
result.append(dict(zip(COLUMNS_N0GSG, (
row['id'],
' '.join((row['callsign'], row['name'])),
'Private Call', # Call Type
'No', # Receive Tone
))))
return result
def write_n0gsg_csv(contacts, csvo,
fieldnames=COLUMNS_N0GSG, writeheader=False):
"""Writes contacts to the csvo file-like object.
"""
csvw = csv.DictWriter(csvo, fieldnames)
if writeheader:
csvw.writeheader()
for row in contacts:
csvw.writerow(row)
def get_users(db_url=MOST_HEARD_URL):
source = requests.get(db_url, stream=True)
users = read_most_heard_csv(source.raw)
source.close()
return users
if __name__ == '__main__':
users = get_users()
groups = get_groups()
with open('n0gsg-dci-bm-dmrx-most-heard.csv', 'wb') as csvo:
write_n0gsg_csv(groups + users, csvo)
|
Add support for N0GSG csv and DMRX MostHeard.csv
|
Add support for N0GSG csv and DMRX MostHeard.csv
|
Python
|
apache-2.0
|
ajorg/DMR_contacts
|
Add support for N0GSG csv and DMRX MostHeard.csv
|
#!/usr/bin/env python2
import csv
# dmrx.net uses SSL SNI, which urllib2 doesn't support
import requests
from dmr_marc_users_cs750 import get_groups
MOST_HEARD_URL = 'https://dmrx.net/csvfiles/MostHeard.csv'
# Neither of these formats uses a header row
COLUMNS_N0GSG = ('Call ID', 'Call Alias', 'Call Type', 'Receive Tone')
COLUMNS_DMRX = ('id', 'callsign', 'name')
def read_most_heard_csv(users):
"""Reads DMRX csv from the heard file-like object and returns a list of
dicts in N0GSG export format."""
csvr = csv.DictReader(users, fieldnames=COLUMNS_DMRX)
result = []
for row in csvr:
result.append(dict(zip(COLUMNS_N0GSG, (
row['id'],
' '.join((row['callsign'], row['name'])),
'Private Call', # Call Type
'No', # Receive Tone
))))
return result
def write_n0gsg_csv(contacts, csvo,
fieldnames=COLUMNS_N0GSG, writeheader=False):
"""Writes contacts to the csvo file-like object.
"""
csvw = csv.DictWriter(csvo, fieldnames)
if writeheader:
csvw.writeheader()
for row in contacts:
csvw.writerow(row)
def get_users(db_url=MOST_HEARD_URL):
source = requests.get(db_url, stream=True)
users = read_most_heard_csv(source.raw)
source.close()
return users
if __name__ == '__main__':
users = get_users()
groups = get_groups()
with open('n0gsg-dci-bm-dmrx-most-heard.csv', 'wb') as csvo:
write_n0gsg_csv(groups + users, csvo)
|
<commit_before><commit_msg>Add support for N0GSG csv and DMRX MostHeard.csv<commit_after>
|
#!/usr/bin/env python2
import csv
# dmrx.net uses SSL SNI, which urllib2 doesn't support
import requests
from dmr_marc_users_cs750 import get_groups
MOST_HEARD_URL = 'https://dmrx.net/csvfiles/MostHeard.csv'
# Neither of these formats uses a header row
COLUMNS_N0GSG = ('Call ID', 'Call Alias', 'Call Type', 'Receive Tone')
COLUMNS_DMRX = ('id', 'callsign', 'name')
def read_most_heard_csv(users):
"""Reads DMRX csv from the heard file-like object and returns a list of
dicts in N0GSG export format."""
csvr = csv.DictReader(users, fieldnames=COLUMNS_DMRX)
result = []
for row in csvr:
result.append(dict(zip(COLUMNS_N0GSG, (
row['id'],
' '.join((row['callsign'], row['name'])),
'Private Call', # Call Type
'No', # Receive Tone
))))
return result
def write_n0gsg_csv(contacts, csvo,
fieldnames=COLUMNS_N0GSG, writeheader=False):
"""Writes contacts to the csvo file-like object.
"""
csvw = csv.DictWriter(csvo, fieldnames)
if writeheader:
csvw.writeheader()
for row in contacts:
csvw.writerow(row)
def get_users(db_url=MOST_HEARD_URL):
source = requests.get(db_url, stream=True)
users = read_most_heard_csv(source.raw)
source.close()
return users
if __name__ == '__main__':
users = get_users()
groups = get_groups()
with open('n0gsg-dci-bm-dmrx-most-heard.csv', 'wb') as csvo:
write_n0gsg_csv(groups + users, csvo)
|
Add support for N0GSG csv and DMRX MostHeard.csv#!/usr/bin/env python2
import csv
# dmrx.net uses SSL SNI, which urllib2 doesn't support
import requests
from dmr_marc_users_cs750 import get_groups
MOST_HEARD_URL = 'https://dmrx.net/csvfiles/MostHeard.csv'
# Neither of these formats uses a header row
COLUMNS_N0GSG = ('Call ID', 'Call Alias', 'Call Type', 'Receive Tone')
COLUMNS_DMRX = ('id', 'callsign', 'name')
def read_most_heard_csv(users):
"""Reads DMRX csv from the heard file-like object and returns a list of
dicts in N0GSG export format."""
csvr = csv.DictReader(users, fieldnames=COLUMNS_DMRX)
result = []
for row in csvr:
result.append(dict(zip(COLUMNS_N0GSG, (
row['id'],
' '.join((row['callsign'], row['name'])),
'Private Call', # Call Type
'No', # Receive Tone
))))
return result
def write_n0gsg_csv(contacts, csvo,
fieldnames=COLUMNS_N0GSG, writeheader=False):
"""Writes contacts to the csvo file-like object.
"""
csvw = csv.DictWriter(csvo, fieldnames)
if writeheader:
csvw.writeheader()
for row in contacts:
csvw.writerow(row)
def get_users(db_url=MOST_HEARD_URL):
source = requests.get(db_url, stream=True)
users = read_most_heard_csv(source.raw)
source.close()
return users
if __name__ == '__main__':
users = get_users()
groups = get_groups()
with open('n0gsg-dci-bm-dmrx-most-heard.csv', 'wb') as csvo:
write_n0gsg_csv(groups + users, csvo)
|
<commit_before><commit_msg>Add support for N0GSG csv and DMRX MostHeard.csv<commit_after>#!/usr/bin/env python2
import csv
# dmrx.net uses SSL SNI, which urllib2 doesn't support
import requests
from dmr_marc_users_cs750 import get_groups
MOST_HEARD_URL = 'https://dmrx.net/csvfiles/MostHeard.csv'
# Neither of these formats uses a header row
COLUMNS_N0GSG = ('Call ID', 'Call Alias', 'Call Type', 'Receive Tone')
COLUMNS_DMRX = ('id', 'callsign', 'name')
def read_most_heard_csv(users):
"""Reads DMRX csv from the heard file-like object and returns a list of
dicts in N0GSG export format."""
csvr = csv.DictReader(users, fieldnames=COLUMNS_DMRX)
result = []
for row in csvr:
result.append(dict(zip(COLUMNS_N0GSG, (
row['id'],
' '.join((row['callsign'], row['name'])),
'Private Call', # Call Type
'No', # Receive Tone
))))
return result
def write_n0gsg_csv(contacts, csvo,
fieldnames=COLUMNS_N0GSG, writeheader=False):
"""Writes contacts to the csvo file-like object.
"""
csvw = csv.DictWriter(csvo, fieldnames)
if writeheader:
csvw.writeheader()
for row in contacts:
csvw.writerow(row)
def get_users(db_url=MOST_HEARD_URL):
source = requests.get(db_url, stream=True)
users = read_most_heard_csv(source.raw)
source.close()
return users
if __name__ == '__main__':
users = get_users()
groups = get_groups()
with open('n0gsg-dci-bm-dmrx-most-heard.csv', 'wb') as csvo:
write_n0gsg_csv(groups + users, csvo)
|
|
a684564eace2185b40acf3413c8f75587195ff46
|
unitary/examples/tictactoe/ascii_board.py
|
unitary/examples/tictactoe/ascii_board.py
|
# Copyright 2022 Google
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unitary.examples.tictactoe.enums import TicTacSquare, TicTacResult
from unitary.examples.tictactoe.tic_tac_toe import TicTacToe
def _flip_turn(turn: TicTacSquare):
return TicTacSquare.O if turn == TicTacSquare.X else TicTacSquare.X
class AsciiBoard:
def __init__(self):
self.board = TicTacToe()
def play(self):
turn = TicTacSquare.X
result = TicTacResult.UNFINISHED
while result == TicTacResult.UNFINISHED:
print(self.board.print())
move = input(f"{turn.name} turn to move: ")
result = self.board.move(move, turn)
turn = _flip_turn(turn)
print(f"Result: {result.name}")
if __name__ == "__main__":
AsciiBoard().play()
|
Add ASCII board for Quantum TicTacToe board
|
Add ASCII board for Quantum TicTacToe board
- Add preliminary ASCII board for Quantum TicTacToe
- Displays probability for blank (.) and X and O.
|
Python
|
apache-2.0
|
quantumlib/unitary,quantumlib/unitary
|
Add ASCII board for Quantum TicTacToe board
- Add preliminary ASCII board for Quantum TicTacToe
- Displays probability for blank (.) and X and O.
|
# Copyright 2022 Google
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unitary.examples.tictactoe.enums import TicTacSquare, TicTacResult
from unitary.examples.tictactoe.tic_tac_toe import TicTacToe
def _flip_turn(turn: TicTacSquare):
return TicTacSquare.O if turn == TicTacSquare.X else TicTacSquare.X
class AsciiBoard:
def __init__(self):
self.board = TicTacToe()
def play(self):
turn = TicTacSquare.X
result = TicTacResult.UNFINISHED
while result == TicTacResult.UNFINISHED:
print(self.board.print())
move = input(f"{turn.name} turn to move: ")
result = self.board.move(move, turn)
turn = _flip_turn(turn)
print(f"Result: {result.name}")
if __name__ == "__main__":
AsciiBoard().play()
|
<commit_before><commit_msg>Add ASCII board for Quantum TicTacToe board
- Add preliminary ASCII board for Quantum TicTacToe
- Displays probability for blank (.) and X and O.<commit_after>
|
# Copyright 2022 Google
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unitary.examples.tictactoe.enums import TicTacSquare, TicTacResult
from unitary.examples.tictactoe.tic_tac_toe import TicTacToe
def _flip_turn(turn: TicTacSquare):
return TicTacSquare.O if turn == TicTacSquare.X else TicTacSquare.X
class AsciiBoard:
def __init__(self):
self.board = TicTacToe()
def play(self):
turn = TicTacSquare.X
result = TicTacResult.UNFINISHED
while result == TicTacResult.UNFINISHED:
print(self.board.print())
move = input(f"{turn.name} turn to move: ")
result = self.board.move(move, turn)
turn = _flip_turn(turn)
print(f"Result: {result.name}")
if __name__ == "__main__":
AsciiBoard().play()
|
Add ASCII board for Quantum TicTacToe board
- Add preliminary ASCII board for Quantum TicTacToe
- Displays probability for blank (.) and X and O.# Copyright 2022 Google
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unitary.examples.tictactoe.enums import TicTacSquare, TicTacResult
from unitary.examples.tictactoe.tic_tac_toe import TicTacToe
def _flip_turn(turn: TicTacSquare):
return TicTacSquare.O if turn == TicTacSquare.X else TicTacSquare.X
class AsciiBoard:
def __init__(self):
self.board = TicTacToe()
def play(self):
turn = TicTacSquare.X
result = TicTacResult.UNFINISHED
while result == TicTacResult.UNFINISHED:
print(self.board.print())
move = input(f"{turn.name} turn to move: ")
result = self.board.move(move, turn)
turn = _flip_turn(turn)
print(f"Result: {result.name}")
if __name__ == "__main__":
AsciiBoard().play()
|
<commit_before><commit_msg>Add ASCII board for Quantum TicTacToe board
- Add preliminary ASCII board for Quantum TicTacToe
- Displays probability for blank (.) and X and O.<commit_after># Copyright 2022 Google
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unitary.examples.tictactoe.enums import TicTacSquare, TicTacResult
from unitary.examples.tictactoe.tic_tac_toe import TicTacToe
def _flip_turn(turn: TicTacSquare):
return TicTacSquare.O if turn == TicTacSquare.X else TicTacSquare.X
class AsciiBoard:
def __init__(self):
self.board = TicTacToe()
def play(self):
turn = TicTacSquare.X
result = TicTacResult.UNFINISHED
while result == TicTacResult.UNFINISHED:
print(self.board.print())
move = input(f"{turn.name} turn to move: ")
result = self.board.move(move, turn)
turn = _flip_turn(turn)
print(f"Result: {result.name}")
if __name__ == "__main__":
AsciiBoard().play()
|
|
a176d05ec1481ab3892b4d4768e6bad1fdbd4868
|
tools/update_whatsnew.py
|
tools/update_whatsnew.py
|
"""Update the What's New doc (development version)
This collects the snippets from whatsnew/pr/, moves their content into
whatsnew/development.rst (chronologically ordered), and deletes the snippets.
"""
import io
import os
from os.path import dirname, basename, abspath, join as pjoin
from subprocess import check_call, check_output
repo_root = dirname(dirname(abspath(__file__)))
whatsnew_dir = pjoin(repo_root, 'docs', 'source', 'whatsnew')
pr_dir = pjoin(whatsnew_dir, 'pr')
target = pjoin(whatsnew_dir, 'development.rst')
FEATURE_MARK = ".. DO NOT EDIT THIS LINE BEFORE RELEASE. FEATURE INSERTION POINT."
INCOMPAT_MARK = ".. DO NOT EDIT THIS LINE BEFORE RELEASE. INCOMPAT INSERTION POINT."
# 1. Collect the whatsnew snippet files ---------------------------------------
files = set(os.listdir(pr_dir))
# Ignore explanatory and example files
files.difference_update({'README.md',
'incompat-switching-to-perl.rst',
'antigravity-feature.rst'}
)
# Absolute paths
files = {pjoin(pr_dir, f) for f in files}
def getmtime(f):
return check_output(['git', 'log', '-1', '--format="%ai"', '--', f])
files = sorted(files, key=getmtime)
features, incompats = [], []
for path in files:
with io.open(path, encoding='utf-8') as f:
content = f.read().rstrip()
if basename(path).startswith('incompat-'):
incompats.append(content)
else:
features.append(content)
# Put the insertion markers back on the end, so they're ready for next time.
feature_block = '\n\n'.join(features + [FEATURE_MARK])
incompat_block = '\n\n'.join(incompats + [INCOMPAT_MARK])
# 2. Update the target file ---------------------------------------------------
with io.open(target, encoding='utf-8') as f:
content = f.read()
assert content.count(FEATURE_MARK) == 1
assert content.count(INCOMPAT_MARK) == 1
content = content.replace(FEATURE_MARK, feature_block)
content = content.replace(INCOMPAT_MARK, incompat_block)
# Clean trailing whitespace
content = '\n'.join(l.rstrip() for l in content.splitlines())
with io.open(target, 'w', encoding='utf-8') as f:
f.write(content)
# 3. Stage the changes in git -------------------------------------------------
for file in files:
check_call(['git', 'rm', file])
check_call(['git', 'add', target])
print("Merged what's new changes. Check the diff and commit the change.")
|
Add script to update whatsnew file from PR snippets
|
Add script to update whatsnew file from PR snippets
|
Python
|
bsd-3-clause
|
ipython/ipython,ipython/ipython
|
Add script to update whatsnew file from PR snippets
|
"""Update the What's New doc (development version)
This collects the snippets from whatsnew/pr/, moves their content into
whatsnew/development.rst (chronologically ordered), and deletes the snippets.
"""
import io
import os
from os.path import dirname, basename, abspath, join as pjoin
from subprocess import check_call, check_output
repo_root = dirname(dirname(abspath(__file__)))
whatsnew_dir = pjoin(repo_root, 'docs', 'source', 'whatsnew')
pr_dir = pjoin(whatsnew_dir, 'pr')
target = pjoin(whatsnew_dir, 'development.rst')
FEATURE_MARK = ".. DO NOT EDIT THIS LINE BEFORE RELEASE. FEATURE INSERTION POINT."
INCOMPAT_MARK = ".. DO NOT EDIT THIS LINE BEFORE RELEASE. INCOMPAT INSERTION POINT."
# 1. Collect the whatsnew snippet files ---------------------------------------
files = set(os.listdir(pr_dir))
# Ignore explanatory and example files
files.difference_update({'README.md',
'incompat-switching-to-perl.rst',
'antigravity-feature.rst'}
)
# Absolute paths
files = {pjoin(pr_dir, f) for f in files}
def getmtime(f):
return check_output(['git', 'log', '-1', '--format="%ai"', '--', f])
files = sorted(files, key=getmtime)
features, incompats = [], []
for path in files:
with io.open(path, encoding='utf-8') as f:
content = f.read().rstrip()
if basename(path).startswith('incompat-'):
incompats.append(content)
else:
features.append(content)
# Put the insertion markers back on the end, so they're ready for next time.
feature_block = '\n\n'.join(features + [FEATURE_MARK])
incompat_block = '\n\n'.join(incompats + [INCOMPAT_MARK])
# 2. Update the target file ---------------------------------------------------
with io.open(target, encoding='utf-8') as f:
content = f.read()
assert content.count(FEATURE_MARK) == 1
assert content.count(INCOMPAT_MARK) == 1
content = content.replace(FEATURE_MARK, feature_block)
content = content.replace(INCOMPAT_MARK, incompat_block)
# Clean trailing whitespace
content = '\n'.join(l.rstrip() for l in content.splitlines())
with io.open(target, 'w', encoding='utf-8') as f:
f.write(content)
# 3. Stage the changes in git -------------------------------------------------
for file in files:
check_call(['git', 'rm', file])
check_call(['git', 'add', target])
print("Merged what's new changes. Check the diff and commit the change.")
|
<commit_before><commit_msg>Add script to update whatsnew file from PR snippets<commit_after>
|
"""Update the What's New doc (development version)
This collects the snippets from whatsnew/pr/, moves their content into
whatsnew/development.rst (chronologically ordered), and deletes the snippets.
"""
import io
import os
from os.path import dirname, basename, abspath, join as pjoin
from subprocess import check_call, check_output
repo_root = dirname(dirname(abspath(__file__)))
whatsnew_dir = pjoin(repo_root, 'docs', 'source', 'whatsnew')
pr_dir = pjoin(whatsnew_dir, 'pr')
target = pjoin(whatsnew_dir, 'development.rst')
FEATURE_MARK = ".. DO NOT EDIT THIS LINE BEFORE RELEASE. FEATURE INSERTION POINT."
INCOMPAT_MARK = ".. DO NOT EDIT THIS LINE BEFORE RELEASE. INCOMPAT INSERTION POINT."
# 1. Collect the whatsnew snippet files ---------------------------------------
files = set(os.listdir(pr_dir))
# Ignore explanatory and example files
files.difference_update({'README.md',
'incompat-switching-to-perl.rst',
'antigravity-feature.rst'}
)
# Absolute paths
files = {pjoin(pr_dir, f) for f in files}
def getmtime(f):
return check_output(['git', 'log', '-1', '--format="%ai"', '--', f])
files = sorted(files, key=getmtime)
features, incompats = [], []
for path in files:
with io.open(path, encoding='utf-8') as f:
content = f.read().rstrip()
if basename(path).startswith('incompat-'):
incompats.append(content)
else:
features.append(content)
# Put the insertion markers back on the end, so they're ready for next time.
feature_block = '\n\n'.join(features + [FEATURE_MARK])
incompat_block = '\n\n'.join(incompats + [INCOMPAT_MARK])
# 2. Update the target file ---------------------------------------------------
with io.open(target, encoding='utf-8') as f:
content = f.read()
assert content.count(FEATURE_MARK) == 1
assert content.count(INCOMPAT_MARK) == 1
content = content.replace(FEATURE_MARK, feature_block)
content = content.replace(INCOMPAT_MARK, incompat_block)
# Clean trailing whitespace
content = '\n'.join(l.rstrip() for l in content.splitlines())
with io.open(target, 'w', encoding='utf-8') as f:
f.write(content)
# 3. Stage the changes in git -------------------------------------------------
for file in files:
check_call(['git', 'rm', file])
check_call(['git', 'add', target])
print("Merged what's new changes. Check the diff and commit the change.")
|
Add script to update whatsnew file from PR snippets"""Update the What's New doc (development version)
This collects the snippets from whatsnew/pr/, moves their content into
whatsnew/development.rst (chronologically ordered), and deletes the snippets.
"""
import io
import os
from os.path import dirname, basename, abspath, join as pjoin
from subprocess import check_call, check_output
repo_root = dirname(dirname(abspath(__file__)))
whatsnew_dir = pjoin(repo_root, 'docs', 'source', 'whatsnew')
pr_dir = pjoin(whatsnew_dir, 'pr')
target = pjoin(whatsnew_dir, 'development.rst')
FEATURE_MARK = ".. DO NOT EDIT THIS LINE BEFORE RELEASE. FEATURE INSERTION POINT."
INCOMPAT_MARK = ".. DO NOT EDIT THIS LINE BEFORE RELEASE. INCOMPAT INSERTION POINT."
# 1. Collect the whatsnew snippet files ---------------------------------------
files = set(os.listdir(pr_dir))
# Ignore explanatory and example files
files.difference_update({'README.md',
'incompat-switching-to-perl.rst',
'antigravity-feature.rst'}
)
# Absolute paths
files = {pjoin(pr_dir, f) for f in files}
def getmtime(f):
return check_output(['git', 'log', '-1', '--format="%ai"', '--', f])
files = sorted(files, key=getmtime)
features, incompats = [], []
for path in files:
with io.open(path, encoding='utf-8') as f:
content = f.read().rstrip()
if basename(path).startswith('incompat-'):
incompats.append(content)
else:
features.append(content)
# Put the insertion markers back on the end, so they're ready for next time.
feature_block = '\n\n'.join(features + [FEATURE_MARK])
incompat_block = '\n\n'.join(incompats + [INCOMPAT_MARK])
# 2. Update the target file ---------------------------------------------------
with io.open(target, encoding='utf-8') as f:
content = f.read()
assert content.count(FEATURE_MARK) == 1
assert content.count(INCOMPAT_MARK) == 1
content = content.replace(FEATURE_MARK, feature_block)
content = content.replace(INCOMPAT_MARK, incompat_block)
# Clean trailing whitespace
content = '\n'.join(l.rstrip() for l in content.splitlines())
with io.open(target, 'w', encoding='utf-8') as f:
f.write(content)
# 3. Stage the changes in git -------------------------------------------------
for file in files:
check_call(['git', 'rm', file])
check_call(['git', 'add', target])
print("Merged what's new changes. Check the diff and commit the change.")
|
<commit_before><commit_msg>Add script to update whatsnew file from PR snippets<commit_after>"""Update the What's New doc (development version)
This collects the snippets from whatsnew/pr/, moves their content into
whatsnew/development.rst (chronologically ordered), and deletes the snippets.
"""
import io
import os
from os.path import dirname, basename, abspath, join as pjoin
from subprocess import check_call, check_output
repo_root = dirname(dirname(abspath(__file__)))
whatsnew_dir = pjoin(repo_root, 'docs', 'source', 'whatsnew')
pr_dir = pjoin(whatsnew_dir, 'pr')
target = pjoin(whatsnew_dir, 'development.rst')
FEATURE_MARK = ".. DO NOT EDIT THIS LINE BEFORE RELEASE. FEATURE INSERTION POINT."
INCOMPAT_MARK = ".. DO NOT EDIT THIS LINE BEFORE RELEASE. INCOMPAT INSERTION POINT."
# 1. Collect the whatsnew snippet files ---------------------------------------
files = set(os.listdir(pr_dir))
# Ignore explanatory and example files
files.difference_update({'README.md',
'incompat-switching-to-perl.rst',
'antigravity-feature.rst'}
)
# Absolute paths
files = {pjoin(pr_dir, f) for f in files}
def getmtime(f):
return check_output(['git', 'log', '-1', '--format="%ai"', '--', f])
files = sorted(files, key=getmtime)
features, incompats = [], []
for path in files:
with io.open(path, encoding='utf-8') as f:
content = f.read().rstrip()
if basename(path).startswith('incompat-'):
incompats.append(content)
else:
features.append(content)
# Put the insertion markers back on the end, so they're ready for next time.
feature_block = '\n\n'.join(features + [FEATURE_MARK])
incompat_block = '\n\n'.join(incompats + [INCOMPAT_MARK])
# 2. Update the target file ---------------------------------------------------
with io.open(target, encoding='utf-8') as f:
content = f.read()
assert content.count(FEATURE_MARK) == 1
assert content.count(INCOMPAT_MARK) == 1
content = content.replace(FEATURE_MARK, feature_block)
content = content.replace(INCOMPAT_MARK, incompat_block)
# Clean trailing whitespace
content = '\n'.join(l.rstrip() for l in content.splitlines())
with io.open(target, 'w', encoding='utf-8') as f:
f.write(content)
# 3. Stage the changes in git -------------------------------------------------
for file in files:
check_call(['git', 'rm', file])
check_call(['git', 'add', target])
print("Merged what's new changes. Check the diff and commit the change.")
|
|
ad301e03c4c1109b7caf3371fe64f638dfc1349b
|
NeedForCryptography/Tools/discretelog.py
|
NeedForCryptography/Tools/discretelog.py
|
def log(base, value, modulo):
for power in range(0, modulo + 1):
if value == base ** power % modulo:
return power
raise ValueError('log{}({}) does not have discrete logarithm in {}'
.format(base, value, modulo))
|
Add brute-force algorithm for discrete logarithm.
|
Add brute-force algorithm for discrete logarithm.
|
Python
|
mit
|
Veselin-Genadiev/NeedForCryptography
|
Add brute-force algorithm for discrete logarithm.
|
def log(base, value, modulo):
for power in range(0, modulo + 1):
if value == base ** power % modulo:
return power
raise ValueError('log{}({}) does not have discrete logarithm in {}'
.format(base, value, modulo))
|
<commit_before><commit_msg>Add brute-force algorithm for discrete logarithm.<commit_after>
|
def log(base, value, modulo):
for power in range(0, modulo + 1):
if value == base ** power % modulo:
return power
raise ValueError('log{}({}) does not have discrete logarithm in {}'
.format(base, value, modulo))
|
Add brute-force algorithm for discrete logarithm.def log(base, value, modulo):
for power in range(0, modulo + 1):
if value == base ** power % modulo:
return power
raise ValueError('log{}({}) does not have discrete logarithm in {}'
.format(base, value, modulo))
|
<commit_before><commit_msg>Add brute-force algorithm for discrete logarithm.<commit_after>def log(base, value, modulo):
for power in range(0, modulo + 1):
if value == base ** power % modulo:
return power
raise ValueError('log{}({}) does not have discrete logarithm in {}'
.format(base, value, modulo))
|
|
7b1dfeab79040e03aa7a9e247028d5da4e37cf4f
|
examples/test_cycle_elements.py
|
examples/test_cycle_elements.py
|
from seleniumbase import BaseCase
class CycleTests(BaseCase):
def test_cycle_elements_with_tab_and_press_enter(self):
""" Test pressing the tab key to cycle through elements.
Then click on the active element and verify actions.
This can all be performed by using a single command.
The "\t" is the tab key. The "\n" is the RETURN key. """
self.open("seleniumbase.io/demo_page")
self.assert_text("This Text is Green", "#pText")
self.send_keys("html", "\t\t\t\t\n")
self.assert_text("This Text is Purple", "#pText")
|
Add test that tabs through and clicks the active element
|
Add test that tabs through and clicks the active element
|
Python
|
mit
|
seleniumbase/SeleniumBase,seleniumbase/SeleniumBase,seleniumbase/SeleniumBase,seleniumbase/SeleniumBase
|
Add test that tabs through and clicks the active element
|
from seleniumbase import BaseCase
class CycleTests(BaseCase):
def test_cycle_elements_with_tab_and_press_enter(self):
""" Test pressing the tab key to cycle through elements.
Then click on the active element and verify actions.
This can all be performed by using a single command.
The "\t" is the tab key. The "\n" is the RETURN key. """
self.open("seleniumbase.io/demo_page")
self.assert_text("This Text is Green", "#pText")
self.send_keys("html", "\t\t\t\t\n")
self.assert_text("This Text is Purple", "#pText")
|
<commit_before><commit_msg>Add test that tabs through and clicks the active element<commit_after>
|
from seleniumbase import BaseCase
class CycleTests(BaseCase):
def test_cycle_elements_with_tab_and_press_enter(self):
""" Test pressing the tab key to cycle through elements.
Then click on the active element and verify actions.
This can all be performed by using a single command.
The "\t" is the tab key. The "\n" is the RETURN key. """
self.open("seleniumbase.io/demo_page")
self.assert_text("This Text is Green", "#pText")
self.send_keys("html", "\t\t\t\t\n")
self.assert_text("This Text is Purple", "#pText")
|
Add test that tabs through and clicks the active elementfrom seleniumbase import BaseCase
class CycleTests(BaseCase):
def test_cycle_elements_with_tab_and_press_enter(self):
""" Test pressing the tab key to cycle through elements.
Then click on the active element and verify actions.
This can all be performed by using a single command.
The "\t" is the tab key. The "\n" is the RETURN key. """
self.open("seleniumbase.io/demo_page")
self.assert_text("This Text is Green", "#pText")
self.send_keys("html", "\t\t\t\t\n")
self.assert_text("This Text is Purple", "#pText")
|
<commit_before><commit_msg>Add test that tabs through and clicks the active element<commit_after>from seleniumbase import BaseCase
class CycleTests(BaseCase):
def test_cycle_elements_with_tab_and_press_enter(self):
""" Test pressing the tab key to cycle through elements.
Then click on the active element and verify actions.
This can all be performed by using a single command.
The "\t" is the tab key. The "\n" is the RETURN key. """
self.open("seleniumbase.io/demo_page")
self.assert_text("This Text is Green", "#pText")
self.send_keys("html", "\t\t\t\t\n")
self.assert_text("This Text is Purple", "#pText")
|
|
f3a4a21b7e8c4b9d2c1983aec77c91f5193146a2
|
exp/recommendexp/SparsityExp.py
|
exp/recommendexp/SparsityExp.py
|
#Test if we can easily get the SVD of a set of matrices with low rank but under
#a fixed structure
import numpy
import scipy.sparse
from exp.util.SparseUtils import SparseUtils
numpy.set_printoptions(suppress=True, precision=3, linewidth=150)
shape = (15, 20)
r = 10
k = 50
X, U, s, V = SparseUtils.generateSparseLowRank(shape, r, k, verbose=True)
X = numpy.array(X.todense())
Y = numpy.zeros(X.shape)
Y[X.nonzero()] = 1
print(Y)
U2, s2, V2 = numpy.linalg.svd(Y)
print(s2)
X2 = numpy.zeros(X.shape)
for i in range(r):
X2 += s[i]*numpy.diag(U[:,i]).dot(Y).dot(numpy.diag(V[:, i]))
|
Test SVD of sparse matrix
|
Test SVD of sparse matrix
|
Python
|
bsd-3-clause
|
charanpald/APGL
|
Test SVD of sparse matrix
|
#Test if we can easily get the SVD of a set of matrices with low rank but under
#a fixed structure
import numpy
import scipy.sparse
from exp.util.SparseUtils import SparseUtils
numpy.set_printoptions(suppress=True, precision=3, linewidth=150)
shape = (15, 20)
r = 10
k = 50
X, U, s, V = SparseUtils.generateSparseLowRank(shape, r, k, verbose=True)
X = numpy.array(X.todense())
Y = numpy.zeros(X.shape)
Y[X.nonzero()] = 1
print(Y)
U2, s2, V2 = numpy.linalg.svd(Y)
print(s2)
X2 = numpy.zeros(X.shape)
for i in range(r):
X2 += s[i]*numpy.diag(U[:,i]).dot(Y).dot(numpy.diag(V[:, i]))
|
<commit_before><commit_msg>Test SVD of sparse matrix <commit_after>
|
#Test if we can easily get the SVD of a set of matrices with low rank but under
#a fixed structure
import numpy
import scipy.sparse
from exp.util.SparseUtils import SparseUtils
numpy.set_printoptions(suppress=True, precision=3, linewidth=150)
shape = (15, 20)
r = 10
k = 50
X, U, s, V = SparseUtils.generateSparseLowRank(shape, r, k, verbose=True)
X = numpy.array(X.todense())
Y = numpy.zeros(X.shape)
Y[X.nonzero()] = 1
print(Y)
U2, s2, V2 = numpy.linalg.svd(Y)
print(s2)
X2 = numpy.zeros(X.shape)
for i in range(r):
X2 += s[i]*numpy.diag(U[:,i]).dot(Y).dot(numpy.diag(V[:, i]))
|
Test SVD of sparse matrix
#Test if we can easily get the SVD of a set of matrices with low rank but under
#a fixed structure
import numpy
import scipy.sparse
from exp.util.SparseUtils import SparseUtils
numpy.set_printoptions(suppress=True, precision=3, linewidth=150)
shape = (15, 20)
r = 10
k = 50
X, U, s, V = SparseUtils.generateSparseLowRank(shape, r, k, verbose=True)
X = numpy.array(X.todense())
Y = numpy.zeros(X.shape)
Y[X.nonzero()] = 1
print(Y)
U2, s2, V2 = numpy.linalg.svd(Y)
print(s2)
X2 = numpy.zeros(X.shape)
for i in range(r):
X2 += s[i]*numpy.diag(U[:,i]).dot(Y).dot(numpy.diag(V[:, i]))
|
<commit_before><commit_msg>Test SVD of sparse matrix <commit_after>
#Test if we can easily get the SVD of a set of matrices with low rank but under
#a fixed structure
import numpy
import scipy.sparse
from exp.util.SparseUtils import SparseUtils
numpy.set_printoptions(suppress=True, precision=3, linewidth=150)
shape = (15, 20)
r = 10
k = 50
X, U, s, V = SparseUtils.generateSparseLowRank(shape, r, k, verbose=True)
X = numpy.array(X.todense())
Y = numpy.zeros(X.shape)
Y[X.nonzero()] = 1
print(Y)
U2, s2, V2 = numpy.linalg.svd(Y)
print(s2)
X2 = numpy.zeros(X.shape)
for i in range(r):
X2 += s[i]*numpy.diag(U[:,i]).dot(Y).dot(numpy.diag(V[:, i]))
|
|
76aa34e0bad66e8bf0e4864b4654acbc57101441
|
elpiwear/ads1015_demo.py
|
elpiwear/ads1015_demo.py
|
#Demo code for the Adafruit ADS1015 board using the Raspberry Pi
#The driver can also be used with the Ready pin using a GPIO to
#be used in an interrupt mode.
import Rpi.spi as SPI
import ads1015
DEVICE_ADDRESS = 0x48
SPI_PORT = 1
spi = SPI.spi(SPI_PORT,DEVICE_ADDRESS)
adc = ads1015.ads1015(spi)
adc.setchannel(0, True)
print "ADC value:" + hex(adc.getvalue())
|
Add a simple demo code to use the ADS1015 driver
|
Add a simple demo code to use the ADS1015 driver
|
Python
|
mit
|
fjacob21/pycon2015
|
Add a simple demo code to use the ADS1015 driver
|
#Demo code for the Adafruit ADS1015 board using the Raspberry Pi
#The driver can also be used with the Ready pin using a GPIO to
#be used in an interrupt mode.
import Rpi.spi as SPI
import ads1015
DEVICE_ADDRESS = 0x48
SPI_PORT = 1
spi = SPI.spi(SPI_PORT,DEVICE_ADDRESS)
adc = ads1015.ads1015(spi)
adc.setchannel(0, True)
print "ADC value:" + hex(adc.getvalue())
|
<commit_before><commit_msg>Add a simple demo code to use the ADS1015 driver<commit_after>
|
#Demo code for the Adafruit ADS1015 board using the Raspberry Pi
#The driver can also be used with the Ready pin using a GPIO to
#be used in an interrupt mode.
import Rpi.spi as SPI
import ads1015
DEVICE_ADDRESS = 0x48
SPI_PORT = 1
spi = SPI.spi(SPI_PORT,DEVICE_ADDRESS)
adc = ads1015.ads1015(spi)
adc.setchannel(0, True)
print "ADC value:" + hex(adc.getvalue())
|
Add a simple demo code to use the ADS1015 driver#Demo code for the Adafruit ADS1015 board using the Raspberry Pi
#The driver can also be used with the Ready pin using a GPIO to
#be used in an interrupt mode.
import Rpi.spi as SPI
import ads1015
DEVICE_ADDRESS = 0x48
SPI_PORT = 1
spi = SPI.spi(SPI_PORT,DEVICE_ADDRESS)
adc = ads1015.ads1015(spi)
adc.setchannel(0, True)
print "ADC value:" + hex(adc.getvalue())
|
<commit_before><commit_msg>Add a simple demo code to use the ADS1015 driver<commit_after>#Demo code for the Adafruit ADS1015 board using the Raspberry Pi
#The driver can also be used with the Ready pin using a GPIO to
#be used in an interrupt mode.
import Rpi.spi as SPI
import ads1015
DEVICE_ADDRESS = 0x48
SPI_PORT = 1
spi = SPI.spi(SPI_PORT,DEVICE_ADDRESS)
adc = ads1015.ads1015(spi)
adc.setchannel(0, True)
print "ADC value:" + hex(adc.getvalue())
|
|
5f1d0836ca81b5d043de09c9dd6d76d797a3dcd6
|
dev/templates/python/python_unittest.py
|
dev/templates/python/python_unittest.py
|
#!/usr/bin/env python
# encoding: utf-8
"""
${TM_NEW_FILE_BASENAME}.py
Created by ${TM_FULLNAME} on ${TM_DATE}.
Copyright (c) ${TM_YEAR} ${TM_ORGANIZATION_NAME}. All rights reserved.
"""
import unittest
class ${TM_NEW_FILE_BASENAME}(unittest.TestCase):
def setUp(self):
pass
if __name__ == '__main__':
unittest.main()
|
Add python unittest template from TextMate
|
Add python unittest template from TextMate
|
Python
|
isc
|
rcuza/init,rcuza/init
|
Add python unittest template from TextMate
|
#!/usr/bin/env python
# encoding: utf-8
"""
${TM_NEW_FILE_BASENAME}.py
Created by ${TM_FULLNAME} on ${TM_DATE}.
Copyright (c) ${TM_YEAR} ${TM_ORGANIZATION_NAME}. All rights reserved.
"""
import unittest
class ${TM_NEW_FILE_BASENAME}(unittest.TestCase):
def setUp(self):
pass
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add python unittest template from TextMate<commit_after>
|
#!/usr/bin/env python
# encoding: utf-8
"""
${TM_NEW_FILE_BASENAME}.py
Created by ${TM_FULLNAME} on ${TM_DATE}.
Copyright (c) ${TM_YEAR} ${TM_ORGANIZATION_NAME}. All rights reserved.
"""
import unittest
class ${TM_NEW_FILE_BASENAME}(unittest.TestCase):
def setUp(self):
pass
if __name__ == '__main__':
unittest.main()
|
Add python unittest template from TextMate#!/usr/bin/env python
# encoding: utf-8
"""
${TM_NEW_FILE_BASENAME}.py
Created by ${TM_FULLNAME} on ${TM_DATE}.
Copyright (c) ${TM_YEAR} ${TM_ORGANIZATION_NAME}. All rights reserved.
"""
import unittest
class ${TM_NEW_FILE_BASENAME}(unittest.TestCase):
def setUp(self):
pass
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add python unittest template from TextMate<commit_after>#!/usr/bin/env python
# encoding: utf-8
"""
${TM_NEW_FILE_BASENAME}.py
Created by ${TM_FULLNAME} on ${TM_DATE}.
Copyright (c) ${TM_YEAR} ${TM_ORGANIZATION_NAME}. All rights reserved.
"""
import unittest
class ${TM_NEW_FILE_BASENAME}(unittest.TestCase):
def setUp(self):
pass
if __name__ == '__main__':
unittest.main()
|
|
0fb1c8ff1a7bba91d27303b7abfc37f460c97c86
|
scripts/print_schema.py
|
scripts/print_schema.py
|
#!/usr/bin/env python3
import argparse
from sqlalchemy import MetaData
from sqlalchemy_schemadisplay import create_schema_graph
def plot_schema(fn):
graph = create_schema_graph(
metadata=MetaData('sqlite:///'+fn),
show_datatypes=False,
show_indexes=False,
rankdir='LR',
concentrate=False
)
graph.write_png('dbschema.png')
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='Schema plotter')
parser.add_argument('db', help='DB to plot')
args = parser.parse_args()
plot_schema(args.db)
|
Add script from printing the DB schema
|
Add script from printing the DB schema
|
Python
|
bsd-3-clause
|
chrisburr/lhcb-talky,chrisburr/lhcb-talky
|
Add script from printing the DB schema
|
#!/usr/bin/env python3
import argparse
from sqlalchemy import MetaData
from sqlalchemy_schemadisplay import create_schema_graph
def plot_schema(fn):
graph = create_schema_graph(
metadata=MetaData('sqlite:///'+fn),
show_datatypes=False,
show_indexes=False,
rankdir='LR',
concentrate=False
)
graph.write_png('dbschema.png')
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='Schema plotter')
parser.add_argument('db', help='DB to plot')
args = parser.parse_args()
plot_schema(args.db)
|
<commit_before><commit_msg>Add script from printing the DB schema<commit_after>
|
#!/usr/bin/env python3
import argparse
from sqlalchemy import MetaData
from sqlalchemy_schemadisplay import create_schema_graph
def plot_schema(fn):
graph = create_schema_graph(
metadata=MetaData('sqlite:///'+fn),
show_datatypes=False,
show_indexes=False,
rankdir='LR',
concentrate=False
)
graph.write_png('dbschema.png')
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='Schema plotter')
parser.add_argument('db', help='DB to plot')
args = parser.parse_args()
plot_schema(args.db)
|
Add script from printing the DB schema#!/usr/bin/env python3
import argparse
from sqlalchemy import MetaData
from sqlalchemy_schemadisplay import create_schema_graph
def plot_schema(fn):
graph = create_schema_graph(
metadata=MetaData('sqlite:///'+fn),
show_datatypes=False,
show_indexes=False,
rankdir='LR',
concentrate=False
)
graph.write_png('dbschema.png')
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='Schema plotter')
parser.add_argument('db', help='DB to plot')
args = parser.parse_args()
plot_schema(args.db)
|
<commit_before><commit_msg>Add script from printing the DB schema<commit_after>#!/usr/bin/env python3
import argparse
from sqlalchemy import MetaData
from sqlalchemy_schemadisplay import create_schema_graph
def plot_schema(fn):
graph = create_schema_graph(
metadata=MetaData('sqlite:///'+fn),
show_datatypes=False,
show_indexes=False,
rankdir='LR',
concentrate=False
)
graph.write_png('dbschema.png')
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='Schema plotter')
parser.add_argument('db', help='DB to plot')
args = parser.parse_args()
plot_schema(args.db)
|
|
5c277a958f2ce8e97ff539870d13a28181c8ea65
|
task_01.py
|
task_01.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This is a module gets today's date."""
import datetime
CURDATE = None
def get_current_date():
"""Return today's date
Args:
(int): Current day
Returns:
date: the year, month, day
Examples:
>>> import task_01
>>> print task_01.CURDATE
None
>>> print task_01.get_current_date()
datetime.date(2015, 9, 24)
"""
return datetime.date.today()
if __name__ == '__main__':
CURDATE = get_current_date()
print CURDATE
|
Use datetime.date.today() and then if__name__ = '__main__': to make CURDATE available.
|
Use datetime.date.today() and then if__name__ = '__main__': to make CURDATE available.
|
Python
|
mpl-2.0
|
gracehyemin/is210-week-05-synthesizing,gracehyemin/is210-week-05-synthesizing
|
Use datetime.date.today() and then if__name__ = '__main__': to make CURDATE available.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This is a module gets today's date."""
import datetime
CURDATE = None
def get_current_date():
"""Return today's date
Args:
(int): Current day
Returns:
date: the year, month, day
Examples:
>>> import task_01
>>> print task_01.CURDATE
None
>>> print task_01.get_current_date()
datetime.date(2015, 9, 24)
"""
return datetime.date.today()
if __name__ == '__main__':
CURDATE = get_current_date()
print CURDATE
|
<commit_before><commit_msg>Use datetime.date.today() and then if__name__ = '__main__': to make CURDATE available.<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This is a module gets today's date."""
import datetime
CURDATE = None
def get_current_date():
"""Return today's date
Args:
(int): Current day
Returns:
date: the year, month, day
Examples:
>>> import task_01
>>> print task_01.CURDATE
None
>>> print task_01.get_current_date()
datetime.date(2015, 9, 24)
"""
return datetime.date.today()
if __name__ == '__main__':
CURDATE = get_current_date()
print CURDATE
|
Use datetime.date.today() and then if__name__ = '__main__': to make CURDATE available.#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This is a module gets today's date."""
import datetime
CURDATE = None
def get_current_date():
"""Return today's date
Args:
(int): Current day
Returns:
date: the year, month, day
Examples:
>>> import task_01
>>> print task_01.CURDATE
None
>>> print task_01.get_current_date()
datetime.date(2015, 9, 24)
"""
return datetime.date.today()
if __name__ == '__main__':
CURDATE = get_current_date()
print CURDATE
|
<commit_before><commit_msg>Use datetime.date.today() and then if__name__ = '__main__': to make CURDATE available.<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This is a module gets today's date."""
import datetime
CURDATE = None
def get_current_date():
"""Return today's date
Args:
(int): Current day
Returns:
date: the year, month, day
Examples:
>>> import task_01
>>> print task_01.CURDATE
None
>>> print task_01.get_current_date()
datetime.date(2015, 9, 24)
"""
return datetime.date.today()
if __name__ == '__main__':
CURDATE = get_current_date()
print CURDATE
|
|
edba490c7274bf1afb3f13dbb9f82351aab03495
|
migrations/versions/0125_remove_unique_constraint.py
|
migrations/versions/0125_remove_unique_constraint.py
|
"""
Revision ID: 0125_remove_unique_constraint
Revises: 0124_add_free_sms_fragment_limit
Create Date: 2017-10-17 16:47:37.826333
"""
from alembic import op
import sqlalchemy as sa
revision = '0125_remove_unique_constraint'
down_revision = '0124_add_free_sms_fragment_limit'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_service_sms_senders_service_id', table_name='service_sms_senders')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_service_sms_senders_service_id'), table_name='service_sms_senders')
op.create_index('ix_service_sms_senders_service_id', 'service_sms_senders', ['service_id'], unique=True)
# ### end Alembic commands ###
|
Remove unique constraint for ServiceSmsSenders. This will allow a service to have multiple sms senders.
|
Remove unique constraint for ServiceSmsSenders.
This will allow a service to have multiple sms senders.
|
Python
|
mit
|
alphagov/notifications-api,alphagov/notifications-api
|
Remove unique constraint for ServiceSmsSenders.
This will allow a service to have multiple sms senders.
|
"""
Revision ID: 0125_remove_unique_constraint
Revises: 0124_add_free_sms_fragment_limit
Create Date: 2017-10-17 16:47:37.826333
"""
from alembic import op
import sqlalchemy as sa
revision = '0125_remove_unique_constraint'
down_revision = '0124_add_free_sms_fragment_limit'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_service_sms_senders_service_id', table_name='service_sms_senders')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_service_sms_senders_service_id'), table_name='service_sms_senders')
op.create_index('ix_service_sms_senders_service_id', 'service_sms_senders', ['service_id'], unique=True)
# ### end Alembic commands ###
|
<commit_before><commit_msg>Remove unique constraint for ServiceSmsSenders.
This will allow a service to have multiple sms senders.<commit_after>
|
"""
Revision ID: 0125_remove_unique_constraint
Revises: 0124_add_free_sms_fragment_limit
Create Date: 2017-10-17 16:47:37.826333
"""
from alembic import op
import sqlalchemy as sa
revision = '0125_remove_unique_constraint'
down_revision = '0124_add_free_sms_fragment_limit'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_service_sms_senders_service_id', table_name='service_sms_senders')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_service_sms_senders_service_id'), table_name='service_sms_senders')
op.create_index('ix_service_sms_senders_service_id', 'service_sms_senders', ['service_id'], unique=True)
# ### end Alembic commands ###
|
Remove unique constraint for ServiceSmsSenders.
This will allow a service to have multiple sms senders."""
Revision ID: 0125_remove_unique_constraint
Revises: 0124_add_free_sms_fragment_limit
Create Date: 2017-10-17 16:47:37.826333
"""
from alembic import op
import sqlalchemy as sa
revision = '0125_remove_unique_constraint'
down_revision = '0124_add_free_sms_fragment_limit'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_service_sms_senders_service_id', table_name='service_sms_senders')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_service_sms_senders_service_id'), table_name='service_sms_senders')
op.create_index('ix_service_sms_senders_service_id', 'service_sms_senders', ['service_id'], unique=True)
# ### end Alembic commands ###
|
<commit_before><commit_msg>Remove unique constraint for ServiceSmsSenders.
This will allow a service to have multiple sms senders.<commit_after>"""
Revision ID: 0125_remove_unique_constraint
Revises: 0124_add_free_sms_fragment_limit
Create Date: 2017-10-17 16:47:37.826333
"""
from alembic import op
import sqlalchemy as sa
revision = '0125_remove_unique_constraint'
down_revision = '0124_add_free_sms_fragment_limit'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_service_sms_senders_service_id', table_name='service_sms_senders')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_service_sms_senders_service_id'), table_name='service_sms_senders')
op.create_index('ix_service_sms_senders_service_id', 'service_sms_senders', ['service_id'], unique=True)
# ### end Alembic commands ###
|
|
24c757ee86ce121e5cfa27a66ee7d13d167c7f9d
|
tools/subset_symbols.py
|
tools/subset_symbols.py
|
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create a curated subset of NotoSansSymbols."""
__author__ = 'roozbeh@google.com (Roozbeh Pournader)'
import sys
import subset
def main(argv):
"""Subset the Noto Symbols font which is given as the argument."""
source_file_name = argv[1]
target_coverage = {
0x20BA, # TURKISH LIRA SIGN
0x20BC, # MANAT SIGN
0x20BD, # RUBLE SIGN
0x22EE, # VERTICAL ELLIPSIS
0x25AB, # WHITE SMALL SQUARE
0x25FB, # WHITE MEDIUM SQUARE
0x25FC, # BLACK MEDIUM SQUARE
0x25FD, # WHITE MEDIUM SMALL SQUARE
0x25FE, # BLACK MEDIUM SMALL SQUARE
0x2600, # BLACK SUN WITH RAYS
0x266B, # BEAMED EIGHTH NOTES
0x26AA, # MEDIUM WHITE CIRCLE
0x26AB, # MEDIUM BLACK CIRCLE
0x2757, # HEAVY EXCLAMATION MARK SYMBOL
0x2934, # ARROW POINTING RIGHTWARDS THEN CURVING UPWARDS
0x2935, # ARROW POINTING RIGHTWARDS THEN CURVING DOWNWARDS
0x2B05, # LEFTWARDS BLACK ARROW
0x2B06, # UPWARDS BLACK ARROW
0x2B07, # DOWNWARDS BLACK ARROW
0x2B1B, # BLACK LARGE SQUARE
0x2B1C, # WHITE LARGE SQUARE
0x2B50, # WHITE MEDIUM STAR
0x2B55, # HEAVY LARGE CIRCLE
}
target_coverage.update(range(0x2800, 0x28FF+1)) # Braille symbols
subset.subset_font(
source_file_name,
'NotoSansSymbols-Regular-Subsetted.ttf',
include=target_coverage)
if __name__ == '__main__':
main(sys.argv)
|
Add tool for subsetting symbols.
|
Add tool for subsetting symbols.
git-svn-id: ed1ad396822b1b6daf934b380f36d983ac38e44f@276 37e447bf-a746-2f79-0798-35135ca55e94
|
Python
|
apache-2.0
|
yannisl/noto-monolithic,yannisl/noto-monolithic,yannisl/noto-monolithic,wskplho/noto-monolithic,yannisl/noto-monolithic,wskplho/noto-monolithic,wskplho/noto-monolithic,wskplho/noto-monolithic
|
Add tool for subsetting symbols.
git-svn-id: ed1ad396822b1b6daf934b380f36d983ac38e44f@276 37e447bf-a746-2f79-0798-35135ca55e94
|
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create a curated subset of NotoSansSymbols."""
__author__ = 'roozbeh@google.com (Roozbeh Pournader)'
import sys
import subset
def main(argv):
"""Subset the Noto Symbols font which is given as the argument."""
source_file_name = argv[1]
target_coverage = {
0x20BA, # TURKISH LIRA SIGN
0x20BC, # MANAT SIGN
0x20BD, # RUBLE SIGN
0x22EE, # VERTICAL ELLIPSIS
0x25AB, # WHITE SMALL SQUARE
0x25FB, # WHITE MEDIUM SQUARE
0x25FC, # BLACK MEDIUM SQUARE
0x25FD, # WHITE MEDIUM SMALL SQUARE
0x25FE, # BLACK MEDIUM SMALL SQUARE
0x2600, # BLACK SUN WITH RAYS
0x266B, # BEAMED EIGHTH NOTES
0x26AA, # MEDIUM WHITE CIRCLE
0x26AB, # MEDIUM BLACK CIRCLE
0x2757, # HEAVY EXCLAMATION MARK SYMBOL
0x2934, # ARROW POINTING RIGHTWARDS THEN CURVING UPWARDS
0x2935, # ARROW POINTING RIGHTWARDS THEN CURVING DOWNWARDS
0x2B05, # LEFTWARDS BLACK ARROW
0x2B06, # UPWARDS BLACK ARROW
0x2B07, # DOWNWARDS BLACK ARROW
0x2B1B, # BLACK LARGE SQUARE
0x2B1C, # WHITE LARGE SQUARE
0x2B50, # WHITE MEDIUM STAR
0x2B55, # HEAVY LARGE CIRCLE
}
target_coverage.update(range(0x2800, 0x28FF+1)) # Braille symbols
subset.subset_font(
source_file_name,
'NotoSansSymbols-Regular-Subsetted.ttf',
include=target_coverage)
if __name__ == '__main__':
main(sys.argv)
|
<commit_before><commit_msg>Add tool for subsetting symbols.
git-svn-id: ed1ad396822b1b6daf934b380f36d983ac38e44f@276 37e447bf-a746-2f79-0798-35135ca55e94<commit_after>
|
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create a curated subset of NotoSansSymbols."""
__author__ = 'roozbeh@google.com (Roozbeh Pournader)'
import sys
import subset
def main(argv):
"""Subset the Noto Symbols font which is given as the argument."""
source_file_name = argv[1]
target_coverage = {
0x20BA, # TURKISH LIRA SIGN
0x20BC, # MANAT SIGN
0x20BD, # RUBLE SIGN
0x22EE, # VERTICAL ELLIPSIS
0x25AB, # WHITE SMALL SQUARE
0x25FB, # WHITE MEDIUM SQUARE
0x25FC, # BLACK MEDIUM SQUARE
0x25FD, # WHITE MEDIUM SMALL SQUARE
0x25FE, # BLACK MEDIUM SMALL SQUARE
0x2600, # BLACK SUN WITH RAYS
0x266B, # BEAMED EIGHTH NOTES
0x26AA, # MEDIUM WHITE CIRCLE
0x26AB, # MEDIUM BLACK CIRCLE
0x2757, # HEAVY EXCLAMATION MARK SYMBOL
0x2934, # ARROW POINTING RIGHTWARDS THEN CURVING UPWARDS
0x2935, # ARROW POINTING RIGHTWARDS THEN CURVING DOWNWARDS
0x2B05, # LEFTWARDS BLACK ARROW
0x2B06, # UPWARDS BLACK ARROW
0x2B07, # DOWNWARDS BLACK ARROW
0x2B1B, # BLACK LARGE SQUARE
0x2B1C, # WHITE LARGE SQUARE
0x2B50, # WHITE MEDIUM STAR
0x2B55, # HEAVY LARGE CIRCLE
}
target_coverage.update(range(0x2800, 0x28FF+1)) # Braille symbols
subset.subset_font(
source_file_name,
'NotoSansSymbols-Regular-Subsetted.ttf',
include=target_coverage)
if __name__ == '__main__':
main(sys.argv)
|
Add tool for subsetting symbols.
git-svn-id: ed1ad396822b1b6daf934b380f36d983ac38e44f@276 37e447bf-a746-2f79-0798-35135ca55e94#!/usr/bin/python
#
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create a curated subset of NotoSansSymbols."""
__author__ = 'roozbeh@google.com (Roozbeh Pournader)'
import sys
import subset
def main(argv):
"""Subset the Noto Symbols font which is given as the argument."""
source_file_name = argv[1]
target_coverage = {
0x20BA, # TURKISH LIRA SIGN
0x20BC, # MANAT SIGN
0x20BD, # RUBLE SIGN
0x22EE, # VERTICAL ELLIPSIS
0x25AB, # WHITE SMALL SQUARE
0x25FB, # WHITE MEDIUM SQUARE
0x25FC, # BLACK MEDIUM SQUARE
0x25FD, # WHITE MEDIUM SMALL SQUARE
0x25FE, # BLACK MEDIUM SMALL SQUARE
0x2600, # BLACK SUN WITH RAYS
0x266B, # BEAMED EIGHTH NOTES
0x26AA, # MEDIUM WHITE CIRCLE
0x26AB, # MEDIUM BLACK CIRCLE
0x2757, # HEAVY EXCLAMATION MARK SYMBOL
0x2934, # ARROW POINTING RIGHTWARDS THEN CURVING UPWARDS
0x2935, # ARROW POINTING RIGHTWARDS THEN CURVING DOWNWARDS
0x2B05, # LEFTWARDS BLACK ARROW
0x2B06, # UPWARDS BLACK ARROW
0x2B07, # DOWNWARDS BLACK ARROW
0x2B1B, # BLACK LARGE SQUARE
0x2B1C, # WHITE LARGE SQUARE
0x2B50, # WHITE MEDIUM STAR
0x2B55, # HEAVY LARGE CIRCLE
}
target_coverage.update(range(0x2800, 0x28FF+1)) # Braille symbols
subset.subset_font(
source_file_name,
'NotoSansSymbols-Regular-Subsetted.ttf',
include=target_coverage)
if __name__ == '__main__':
main(sys.argv)
|
<commit_before><commit_msg>Add tool for subsetting symbols.
git-svn-id: ed1ad396822b1b6daf934b380f36d983ac38e44f@276 37e447bf-a746-2f79-0798-35135ca55e94<commit_after>#!/usr/bin/python
#
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create a curated subset of NotoSansSymbols."""
__author__ = 'roozbeh@google.com (Roozbeh Pournader)'
import sys
import subset
def main(argv):
"""Subset the Noto Symbols font which is given as the argument."""
source_file_name = argv[1]
target_coverage = {
0x20BA, # TURKISH LIRA SIGN
0x20BC, # MANAT SIGN
0x20BD, # RUBLE SIGN
0x22EE, # VERTICAL ELLIPSIS
0x25AB, # WHITE SMALL SQUARE
0x25FB, # WHITE MEDIUM SQUARE
0x25FC, # BLACK MEDIUM SQUARE
0x25FD, # WHITE MEDIUM SMALL SQUARE
0x25FE, # BLACK MEDIUM SMALL SQUARE
0x2600, # BLACK SUN WITH RAYS
0x266B, # BEAMED EIGHTH NOTES
0x26AA, # MEDIUM WHITE CIRCLE
0x26AB, # MEDIUM BLACK CIRCLE
0x2757, # HEAVY EXCLAMATION MARK SYMBOL
0x2934, # ARROW POINTING RIGHTWARDS THEN CURVING UPWARDS
0x2935, # ARROW POINTING RIGHTWARDS THEN CURVING DOWNWARDS
0x2B05, # LEFTWARDS BLACK ARROW
0x2B06, # UPWARDS BLACK ARROW
0x2B07, # DOWNWARDS BLACK ARROW
0x2B1B, # BLACK LARGE SQUARE
0x2B1C, # WHITE LARGE SQUARE
0x2B50, # WHITE MEDIUM STAR
0x2B55, # HEAVY LARGE CIRCLE
}
target_coverage.update(range(0x2800, 0x28FF+1)) # Braille symbols
subset.subset_font(
source_file_name,
'NotoSansSymbols-Regular-Subsetted.ttf',
include=target_coverage)
if __name__ == '__main__':
main(sys.argv)
|
|
537d2787b92b5ab3246c853a33030378c7fe15f0
|
tests/test_load_tool.py
|
tests/test_load_tool.py
|
from cwltool.load_tool import load_tool
from cwltool.context import LoadingContext, RuntimeContext
from cwltool.errors import WorkflowException
import pytest
from .util import (get_data, get_main_output,
get_windows_safe_factory,
needs_docker, working_directory,
needs_singularity, temp_dir,
windows_needs_docker)
@windows_needs_docker
def test_check_version():
"""Test that it is permitted to load without updating, but not
execute. Attempting to execute without updating to the internal
version should raise an error.
"""
joborder = {"inp": "abc"}
loadingContext = LoadingContext({"do_update": True})
tool = load_tool(get_data("tests/echo.cwl"), loadingContext)
for j in tool.job(joborder, None, RuntimeContext()):
pass
loadingContext = LoadingContext({"do_update": False})
tool = load_tool(get_data("tests/echo.cwl"), loadingContext)
with pytest.raises(WorkflowException):
for j in tool.job(joborder, None, RuntimeContext()):
pass
def test_use_metadata():
"""Test that it will use the version from loadingContext.metadata if
cwlVersion isn't present in the document.
"""
loadingContext = LoadingContext({"do_update": False})
tool = load_tool(get_data("tests/echo.cwl"), loadingContext)
loadingContext = LoadingContext()
loadingContext.metadata = tool.metadata
tooldata = tool.tool.copy()
del tooldata["cwlVersion"]
tool2 = load_tool(tooldata, loadingContext)
|
Add tests for version check and metadata behavior
|
Add tests for version check and metadata behavior
|
Python
|
apache-2.0
|
common-workflow-language/cwltool,common-workflow-language/cwltool,common-workflow-language/cwltool
|
Add tests for version check and metadata behavior
|
from cwltool.load_tool import load_tool
from cwltool.context import LoadingContext, RuntimeContext
from cwltool.errors import WorkflowException
import pytest
from .util import (get_data, get_main_output,
get_windows_safe_factory,
needs_docker, working_directory,
needs_singularity, temp_dir,
windows_needs_docker)
@windows_needs_docker
def test_check_version():
"""Test that it is permitted to load without updating, but not
execute. Attempting to execute without updating to the internal
version should raise an error.
"""
joborder = {"inp": "abc"}
loadingContext = LoadingContext({"do_update": True})
tool = load_tool(get_data("tests/echo.cwl"), loadingContext)
for j in tool.job(joborder, None, RuntimeContext()):
pass
loadingContext = LoadingContext({"do_update": False})
tool = load_tool(get_data("tests/echo.cwl"), loadingContext)
with pytest.raises(WorkflowException):
for j in tool.job(joborder, None, RuntimeContext()):
pass
def test_use_metadata():
"""Test that it will use the version from loadingContext.metadata if
cwlVersion isn't present in the document.
"""
loadingContext = LoadingContext({"do_update": False})
tool = load_tool(get_data("tests/echo.cwl"), loadingContext)
loadingContext = LoadingContext()
loadingContext.metadata = tool.metadata
tooldata = tool.tool.copy()
del tooldata["cwlVersion"]
tool2 = load_tool(tooldata, loadingContext)
|
<commit_before><commit_msg>Add tests for version check and metadata behavior<commit_after>
|
from cwltool.load_tool import load_tool
from cwltool.context import LoadingContext, RuntimeContext
from cwltool.errors import WorkflowException
import pytest
from .util import (get_data, get_main_output,
get_windows_safe_factory,
needs_docker, working_directory,
needs_singularity, temp_dir,
windows_needs_docker)
@windows_needs_docker
def test_check_version():
"""Test that it is permitted to load without updating, but not
execute. Attempting to execute without updating to the internal
version should raise an error.
"""
joborder = {"inp": "abc"}
loadingContext = LoadingContext({"do_update": True})
tool = load_tool(get_data("tests/echo.cwl"), loadingContext)
for j in tool.job(joborder, None, RuntimeContext()):
pass
loadingContext = LoadingContext({"do_update": False})
tool = load_tool(get_data("tests/echo.cwl"), loadingContext)
with pytest.raises(WorkflowException):
for j in tool.job(joborder, None, RuntimeContext()):
pass
def test_use_metadata():
"""Test that it will use the version from loadingContext.metadata if
cwlVersion isn't present in the document.
"""
loadingContext = LoadingContext({"do_update": False})
tool = load_tool(get_data("tests/echo.cwl"), loadingContext)
loadingContext = LoadingContext()
loadingContext.metadata = tool.metadata
tooldata = tool.tool.copy()
del tooldata["cwlVersion"]
tool2 = load_tool(tooldata, loadingContext)
|
Add tests for version check and metadata behaviorfrom cwltool.load_tool import load_tool
from cwltool.context import LoadingContext, RuntimeContext
from cwltool.errors import WorkflowException
import pytest
from .util import (get_data, get_main_output,
get_windows_safe_factory,
needs_docker, working_directory,
needs_singularity, temp_dir,
windows_needs_docker)
@windows_needs_docker
def test_check_version():
"""Test that it is permitted to load without updating, but not
execute. Attempting to execute without updating to the internal
version should raise an error.
"""
joborder = {"inp": "abc"}
loadingContext = LoadingContext({"do_update": True})
tool = load_tool(get_data("tests/echo.cwl"), loadingContext)
for j in tool.job(joborder, None, RuntimeContext()):
pass
loadingContext = LoadingContext({"do_update": False})
tool = load_tool(get_data("tests/echo.cwl"), loadingContext)
with pytest.raises(WorkflowException):
for j in tool.job(joborder, None, RuntimeContext()):
pass
def test_use_metadata():
"""Test that it will use the version from loadingContext.metadata if
cwlVersion isn't present in the document.
"""
loadingContext = LoadingContext({"do_update": False})
tool = load_tool(get_data("tests/echo.cwl"), loadingContext)
loadingContext = LoadingContext()
loadingContext.metadata = tool.metadata
tooldata = tool.tool.copy()
del tooldata["cwlVersion"]
tool2 = load_tool(tooldata, loadingContext)
|
<commit_before><commit_msg>Add tests for version check and metadata behavior<commit_after>from cwltool.load_tool import load_tool
from cwltool.context import LoadingContext, RuntimeContext
from cwltool.errors import WorkflowException
import pytest
from .util import (get_data, get_main_output,
get_windows_safe_factory,
needs_docker, working_directory,
needs_singularity, temp_dir,
windows_needs_docker)
@windows_needs_docker
def test_check_version():
"""Test that it is permitted to load without updating, but not
execute. Attempting to execute without updating to the internal
version should raise an error.
"""
joborder = {"inp": "abc"}
loadingContext = LoadingContext({"do_update": True})
tool = load_tool(get_data("tests/echo.cwl"), loadingContext)
for j in tool.job(joborder, None, RuntimeContext()):
pass
loadingContext = LoadingContext({"do_update": False})
tool = load_tool(get_data("tests/echo.cwl"), loadingContext)
with pytest.raises(WorkflowException):
for j in tool.job(joborder, None, RuntimeContext()):
pass
def test_use_metadata():
"""Test that it will use the version from loadingContext.metadata if
cwlVersion isn't present in the document.
"""
loadingContext = LoadingContext({"do_update": False})
tool = load_tool(get_data("tests/echo.cwl"), loadingContext)
loadingContext = LoadingContext()
loadingContext.metadata = tool.metadata
tooldata = tool.tool.copy()
del tooldata["cwlVersion"]
tool2 = load_tool(tooldata, loadingContext)
|
|
b5ae3a97fde4421307e6cd86dba188cbf7999435
|
chrome/browser/extensions/PRESUBMIT.py
|
chrome/browser/extensions/PRESUBMIT.py
|
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Chromium presubmit script for src/chrome/browser/extensions.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
def GetPreferredTrySlaves():
return ['linux_chromeos_aura:compile']
|
Add aura compile testing by default to likely areas (4).
|
Add aura compile testing by default to likely areas (4).
BUG=chromium:107599
TEST=None
Review URL: http://codereview.chromium.org/8907044
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@114847 0039d316-1c4b-4281-b951-d872f2087c98
|
Python
|
bsd-3-clause
|
Fireblend/chromium-crosswalk,jaruba/chromium.src,dednal/chromium.src,jaruba/chromium.src,Jonekee/chromium.src,TheTypoMaster/chromium-crosswalk,fujunwei/chromium-crosswalk,zcbenz/cefode-chromium,M4sse/chromium.src,chuan9/chromium-crosswalk,Just-D/chromium-1,markYoungH/chromium.src,fujunwei/chromium-crosswalk,hgl888/chromium-crosswalk,jaruba/chromium.src,M4sse/chromium.src,mogoweb/chromium-crosswalk,jaruba/chromium.src,robclark/chromium,krieger-od/nwjs_chromium.src,patrickm/chromium.src,junmin-zhu/chromium-rivertrail,littlstar/chromium.src,nacl-webkit/chrome_deps,mohamed--abdel-maksoud/chromium.src,hgl888/chromium-crosswalk,Chilledheart/chromium,krieger-od/nwjs_chromium.src,Just-D/chromium-1,markYoungH/chromium.src,patrickm/chromium.src,mogoweb/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,dednal/chromium.src,nacl-webkit/chrome_deps,hgl888/chromium-crosswalk-efl,Jonekee/chromium.src,mohamed--abdel-maksoud/chromium.src,axinging/chromium-crosswalk,keishi/chromium,anirudhSK/chromium,robclark/chromium,TheTypoMaster/chromium-crosswalk,ltilve/chromium,hujiajie/pa-chromium,ltilve/chromium,robclark/chromium,rogerwang/chromium,ondra-novak/chromium.src,Jonekee/chromium.src,junmin-zhu/chromium-rivertrail,dushu1203/chromium.src,mohamed--abdel-maksoud/chromium.src,pozdnyakov/chromium-crosswalk,krieger-od/nwjs_chromium.src,keishi/chromium,axinging/chromium-crosswalk,chuan9/chromium-crosswalk,rogerwang/chromium,timopulkkinen/BubbleFish,littlstar/chromium.src,chuan9/chromium-crosswalk,Chilledheart/chromium,rogerwang/chromium,hgl888/chromium-crosswalk,dushu1203/chromium.src,zcbenz/cefode-chromium,jaruba/chromium.src,crosswalk-project/chromium-crosswalk-efl,Jonekee/chromium.src,littlstar/chromium.src,junmin-zhu/chromium-rivertrail,M4sse/chromium.src,axinging/chromium-crosswalk,anirudhSK/chromium,patrickm/chromium.src,pozdnyakov/chromium-crosswalk,Chilledheart/chromium,Jonekee/chromium.src,mogoweb/chromium-crosswalk,junmin-zhu/chromium-rivertrail,chuan9/chromium-crosswalk,timopulkkinen/BubbleFish,markYoungH/chromium.src,ondra-novak/chromium.src,zcbenz/cefode-chromium,Fireblend/chromium-crosswalk,hgl888/chromium-crosswalk-efl,mohamed--abdel-maksoud/chromium.src,ChromiumWebApps/chromium,ChromiumWebApps/chromium,littlstar/chromium.src,Just-D/chromium-1,dednal/chromium.src,hgl888/chromium-crosswalk-efl,mogoweb/chromium-crosswalk,keishi/chromium,PeterWangIntel/chromium-crosswalk,ondra-novak/chromium.src,pozdnyakov/chromium-crosswalk,anirudhSK/chromium,jaruba/chromium.src,hgl888/chromium-crosswalk,bright-sparks/chromium-spacewalk,Jonekee/chromium.src,Fireblend/chromium-crosswalk,fujunwei/chromium-crosswalk,patrickm/chromium.src,markYoungH/chromium.src,jaruba/chromium.src,hgl888/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,pozdnyakov/chromium-crosswalk,keishi/chromium,dushu1203/chromium.src,krieger-od/nwjs_chromium.src,ChromiumWebApps/chromium,Pluto-tv/chromium-crosswalk,Jonekee/chromium.src,ondra-novak/chromium.src,krieger-od/nwjs_chromium.src,junmin-zhu/chromium-rivertrail,fujunwei/chromium-crosswalk,hgl888/chromium-crosswalk,anirudhSK/chromium,Fireblend/chromium-crosswalk,timopulkkinen/BubbleFish,zcbenz/cefode-chromium,axinging/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,ChromiumWebApps/chromium,patrickm/chromium.src,hgl888/chromium-crosswalk-efl,littlstar/chromium.src,M4sse/chromium.src,mohamed--abdel-maksoud/chromium.src,junmin-zhu/chromium-rivertrail,Just-D/chromium-1,hujiajie/pa-chromium,bright-sparks/chromium-spacewalk,robclark/chromium,patrickm/chromium.src,PeterWangIntel/chromium-crosswalk,dushu1203/chromium.src,mohamed--abdel-maksoud/chromium.src,ltilve/chromium,patrickm/chromium.src,markYoungH/chromium.src,pozdnyakov/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,littlstar/chromium.src,TheTypoMaster/chromium-crosswalk,jaruba/chromium.src,junmin-zhu/chromium-rivertrail,krieger-od/nwjs_chromium.src,dushu1203/chromium.src,ChromiumWebApps/chromium,patrickm/chromium.src,Pluto-tv/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,dushu1203/chromium.src,nacl-webkit/chrome_deps,Pluto-tv/chromium-crosswalk,pozdnyakov/chromium-crosswalk,chuan9/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,hujiajie/pa-chromium,Chilledheart/chromium,keishi/chromium,krieger-od/nwjs_chromium.src,Chilledheart/chromium,timopulkkinen/BubbleFish,bright-sparks/chromium-spacewalk,timopulkkinen/BubbleFish,hujiajie/pa-chromium,chuan9/chromium-crosswalk,Chilledheart/chromium,Pluto-tv/chromium-crosswalk,nacl-webkit/chrome_deps,crosswalk-project/chromium-crosswalk-efl,PeterWangIntel/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,pozdnyakov/chromium-crosswalk,Pluto-tv/chromium-crosswalk,M4sse/chromium.src,PeterWangIntel/chromium-crosswalk,ondra-novak/chromium.src,Just-D/chromium-1,M4sse/chromium.src,pozdnyakov/chromium-crosswalk,axinging/chromium-crosswalk,timopulkkinen/BubbleFish,Pluto-tv/chromium-crosswalk,ChromiumWebApps/chromium,hgl888/chromium-crosswalk,ondra-novak/chromium.src,nacl-webkit/chrome_deps,patrickm/chromium.src,axinging/chromium-crosswalk,Just-D/chromium-1,Fireblend/chromium-crosswalk,anirudhSK/chromium,TheTypoMaster/chromium-crosswalk,Pluto-tv/chromium-crosswalk,robclark/chromium,hgl888/chromium-crosswalk,krieger-od/nwjs_chromium.src,timopulkkinen/BubbleFish,hujiajie/pa-chromium,ChromiumWebApps/chromium,Chilledheart/chromium,Jonekee/chromium.src,mohamed--abdel-maksoud/chromium.src,mogoweb/chromium-crosswalk,rogerwang/chromium,axinging/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,crosswalk-project/chromium-crosswalk-efl,littlstar/chromium.src,PeterWangIntel/chromium-crosswalk,nacl-webkit/chrome_deps,rogerwang/chromium,bright-sparks/chromium-spacewalk,keishi/chromium,bright-sparks/chromium-spacewalk,mohamed--abdel-maksoud/chromium.src,ondra-novak/chromium.src,hgl888/chromium-crosswalk-efl,TheTypoMaster/chromium-crosswalk,dushu1203/chromium.src,krieger-od/nwjs_chromium.src,chuan9/chromium-crosswalk,rogerwang/chromium,keishi/chromium,M4sse/chromium.src,hujiajie/pa-chromium,PeterWangIntel/chromium-crosswalk,M4sse/chromium.src,bright-sparks/chromium-spacewalk,bright-sparks/chromium-spacewalk,ltilve/chromium,dednal/chromium.src,ChromiumWebApps/chromium,dednal/chromium.src,hgl888/chromium-crosswalk-efl,markYoungH/chromium.src,keishi/chromium,Fireblend/chromium-crosswalk,hgl888/chromium-crosswalk-efl,Jonekee/chromium.src,anirudhSK/chromium,nacl-webkit/chrome_deps,TheTypoMaster/chromium-crosswalk,zcbenz/cefode-chromium,robclark/chromium,robclark/chromium,zcbenz/cefode-chromium,anirudhSK/chromium,M4sse/chromium.src,junmin-zhu/chromium-rivertrail,zcbenz/cefode-chromium,timopulkkinen/BubbleFish,anirudhSK/chromium,Just-D/chromium-1,fujunwei/chromium-crosswalk,Fireblend/chromium-crosswalk,dednal/chromium.src,PeterWangIntel/chromium-crosswalk,hgl888/chromium-crosswalk-efl,ltilve/chromium,timopulkkinen/BubbleFish,nacl-webkit/chrome_deps,anirudhSK/chromium,zcbenz/cefode-chromium,Pluto-tv/chromium-crosswalk,junmin-zhu/chromium-rivertrail,dednal/chromium.src,TheTypoMaster/chromium-crosswalk,rogerwang/chromium,robclark/chromium,Just-D/chromium-1,keishi/chromium,ChromiumWebApps/chromium,timopulkkinen/BubbleFish,dushu1203/chromium.src,Pluto-tv/chromium-crosswalk,fujunwei/chromium-crosswalk,mogoweb/chromium-crosswalk,anirudhSK/chromium,ondra-novak/chromium.src,jaruba/chromium.src,chuan9/chromium-crosswalk,dushu1203/chromium.src,jaruba/chromium.src,zcbenz/cefode-chromium,pozdnyakov/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,ChromiumWebApps/chromium,rogerwang/chromium,anirudhSK/chromium,ltilve/chromium,pozdnyakov/chromium-crosswalk,hgl888/chromium-crosswalk-efl,mohamed--abdel-maksoud/chromium.src,markYoungH/chromium.src,keishi/chromium,Fireblend/chromium-crosswalk,Jonekee/chromium.src,littlstar/chromium.src,mogoweb/chromium-crosswalk,hujiajie/pa-chromium,hujiajie/pa-chromium,mogoweb/chromium-crosswalk,ltilve/chromium,hgl888/chromium-crosswalk,pozdnyakov/chromium-crosswalk,zcbenz/cefode-chromium,dushu1203/chromium.src,ltilve/chromium,M4sse/chromium.src,zcbenz/cefode-chromium,chuan9/chromium-crosswalk,ChromiumWebApps/chromium,markYoungH/chromium.src,jaruba/chromium.src,mogoweb/chromium-crosswalk,keishi/chromium,ondra-novak/chromium.src,Fireblend/chromium-crosswalk,robclark/chromium,robclark/chromium,crosswalk-project/chromium-crosswalk-efl,Chilledheart/chromium,ltilve/chromium,axinging/chromium-crosswalk,dednal/chromium.src,axinging/chromium-crosswalk,fujunwei/chromium-crosswalk,nacl-webkit/chrome_deps,Just-D/chromium-1,krieger-od/nwjs_chromium.src,fujunwei/chromium-crosswalk,rogerwang/chromium,markYoungH/chromium.src,axinging/chromium-crosswalk,markYoungH/chromium.src,axinging/chromium-crosswalk,krieger-od/nwjs_chromium.src,nacl-webkit/chrome_deps,hgl888/chromium-crosswalk-efl,fujunwei/chromium-crosswalk,hujiajie/pa-chromium,M4sse/chromium.src,rogerwang/chromium,timopulkkinen/BubbleFish,hujiajie/pa-chromium,TheTypoMaster/chromium-crosswalk,bright-sparks/chromium-spacewalk,Jonekee/chromium.src,markYoungH/chromium.src,nacl-webkit/chrome_deps,ChromiumWebApps/chromium,dushu1203/chromium.src,anirudhSK/chromium,junmin-zhu/chromium-rivertrail,dednal/chromium.src,dednal/chromium.src,PeterWangIntel/chromium-crosswalk,Chilledheart/chromium,dednal/chromium.src,junmin-zhu/chromium-rivertrail,hujiajie/pa-chromium,bright-sparks/chromium-spacewalk,mogoweb/chromium-crosswalk
|
Add aura compile testing by default to likely areas (4).
BUG=chromium:107599
TEST=None
Review URL: http://codereview.chromium.org/8907044
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@114847 0039d316-1c4b-4281-b951-d872f2087c98
|
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Chromium presubmit script for src/chrome/browser/extensions.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
def GetPreferredTrySlaves():
return ['linux_chromeos_aura:compile']
|
<commit_before><commit_msg>Add aura compile testing by default to likely areas (4).
BUG=chromium:107599
TEST=None
Review URL: http://codereview.chromium.org/8907044
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@114847 0039d316-1c4b-4281-b951-d872f2087c98<commit_after>
|
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Chromium presubmit script for src/chrome/browser/extensions.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
def GetPreferredTrySlaves():
return ['linux_chromeos_aura:compile']
|
Add aura compile testing by default to likely areas (4).
BUG=chromium:107599
TEST=None
Review URL: http://codereview.chromium.org/8907044
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@114847 0039d316-1c4b-4281-b951-d872f2087c98# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Chromium presubmit script for src/chrome/browser/extensions.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
def GetPreferredTrySlaves():
return ['linux_chromeos_aura:compile']
|
<commit_before><commit_msg>Add aura compile testing by default to likely areas (4).
BUG=chromium:107599
TEST=None
Review URL: http://codereview.chromium.org/8907044
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@114847 0039d316-1c4b-4281-b951-d872f2087c98<commit_after># Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Chromium presubmit script for src/chrome/browser/extensions.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
def GetPreferredTrySlaves():
return ['linux_chromeos_aura:compile']
|
|
cb4167278d3d342cbbb3ea185d0bbff7ff72ff4d
|
python/example_code/s3/s3-python-example-upload-file.py
|
python/example_code/s3/s3-python-example-upload-file.py
|
# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# This file is licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import boto3
# Create an S3 client
s3 = boto3.client('s3')
filename = 'file.txt'
bucket_name = 'my-bucket'
# Uploads the given file using a managed uploader, which will split up large
# files automatically and upload parts in parallel.
s3.upload_file(filename, bucket_name, filename)
# snippet-comment:[These are tags for the AWS doc team's sample catalog. Do not remove.]
# snippet-sourcedescription:[s3-python-example-upload-file.py demonstrates how to add a file (or object) to an Amazon S3 bucket.]
# snippet-keyword:[Python]
# snippet-keyword:[AWS SDK for Python (Boto3)]
# snippet-keyword:[Code Sample]
# snippet-keyword:[Amazon S3]
# snippet-service:[s3]
# snippet-sourcetype:[full-example]
# snippet-sourcedate:[2018-06-25]
# snippet-sourceauthor:[jschwarzwalder (AWS)]
|
Revert "Delete duplicate Python example"
|
Revert "Delete duplicate Python example"
|
Python
|
apache-2.0
|
awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples
|
Revert "Delete duplicate Python example"
|
# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# This file is licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import boto3
# Create an S3 client
s3 = boto3.client('s3')
filename = 'file.txt'
bucket_name = 'my-bucket'
# Uploads the given file using a managed uploader, which will split up large
# files automatically and upload parts in parallel.
s3.upload_file(filename, bucket_name, filename)
# snippet-comment:[These are tags for the AWS doc team's sample catalog. Do not remove.]
# snippet-sourcedescription:[s3-python-example-upload-file.py demonstrates how to add a file (or object) to an Amazon S3 bucket.]
# snippet-keyword:[Python]
# snippet-keyword:[AWS SDK for Python (Boto3)]
# snippet-keyword:[Code Sample]
# snippet-keyword:[Amazon S3]
# snippet-service:[s3]
# snippet-sourcetype:[full-example]
# snippet-sourcedate:[2018-06-25]
# snippet-sourceauthor:[jschwarzwalder (AWS)]
|
<commit_before><commit_msg>Revert "Delete duplicate Python example"<commit_after>
|
# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# This file is licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import boto3
# Create an S3 client
s3 = boto3.client('s3')
filename = 'file.txt'
bucket_name = 'my-bucket'
# Uploads the given file using a managed uploader, which will split up large
# files automatically and upload parts in parallel.
s3.upload_file(filename, bucket_name, filename)
# snippet-comment:[These are tags for the AWS doc team's sample catalog. Do not remove.]
# snippet-sourcedescription:[s3-python-example-upload-file.py demonstrates how to add a file (or object) to an Amazon S3 bucket.]
# snippet-keyword:[Python]
# snippet-keyword:[AWS SDK for Python (Boto3)]
# snippet-keyword:[Code Sample]
# snippet-keyword:[Amazon S3]
# snippet-service:[s3]
# snippet-sourcetype:[full-example]
# snippet-sourcedate:[2018-06-25]
# snippet-sourceauthor:[jschwarzwalder (AWS)]
|
Revert "Delete duplicate Python example"# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# This file is licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import boto3
# Create an S3 client
s3 = boto3.client('s3')
filename = 'file.txt'
bucket_name = 'my-bucket'
# Uploads the given file using a managed uploader, which will split up large
# files automatically and upload parts in parallel.
s3.upload_file(filename, bucket_name, filename)
# snippet-comment:[These are tags for the AWS doc team's sample catalog. Do not remove.]
# snippet-sourcedescription:[s3-python-example-upload-file.py demonstrates how to add a file (or object) to an Amazon S3 bucket.]
# snippet-keyword:[Python]
# snippet-keyword:[AWS SDK for Python (Boto3)]
# snippet-keyword:[Code Sample]
# snippet-keyword:[Amazon S3]
# snippet-service:[s3]
# snippet-sourcetype:[full-example]
# snippet-sourcedate:[2018-06-25]
# snippet-sourceauthor:[jschwarzwalder (AWS)]
|
<commit_before><commit_msg>Revert "Delete duplicate Python example"<commit_after># Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# This file is licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import boto3
# Create an S3 client
s3 = boto3.client('s3')
filename = 'file.txt'
bucket_name = 'my-bucket'
# Uploads the given file using a managed uploader, which will split up large
# files automatically and upload parts in parallel.
s3.upload_file(filename, bucket_name, filename)
# snippet-comment:[These are tags for the AWS doc team's sample catalog. Do not remove.]
# snippet-sourcedescription:[s3-python-example-upload-file.py demonstrates how to add a file (or object) to an Amazon S3 bucket.]
# snippet-keyword:[Python]
# snippet-keyword:[AWS SDK for Python (Boto3)]
# snippet-keyword:[Code Sample]
# snippet-keyword:[Amazon S3]
# snippet-service:[s3]
# snippet-sourcetype:[full-example]
# snippet-sourcedate:[2018-06-25]
# snippet-sourceauthor:[jschwarzwalder (AWS)]
|
|
1fead9006e75aa9ea3933f5f24ff7c8042225a8a
|
version.py
|
version.py
|
ZULIP_VERSION = "1.6.0+git"
PROVISION_VERSION = '5.7'
|
ZULIP_VERSION = "1.6.0+git"
PROVISION_VERSION = '5.8'
|
Bump PROVISION_VERSION for recent package upgrades.
|
Bump PROVISION_VERSION for recent package upgrades.
|
Python
|
apache-2.0
|
kou/zulip,dhcrzf/zulip,jrowan/zulip,tommyip/zulip,brockwhittaker/zulip,rht/zulip,jackrzhang/zulip,Galexrt/zulip,brainwane/zulip,brainwane/zulip,zulip/zulip,kou/zulip,zulip/zulip,rishig/zulip,andersk/zulip,andersk/zulip,dhcrzf/zulip,vabs22/zulip,rishig/zulip,brockwhittaker/zulip,eeshangarg/zulip,timabbott/zulip,amanharitsh123/zulip,showell/zulip,verma-varsha/zulip,tommyip/zulip,showell/zulip,mahim97/zulip,Galexrt/zulip,vabs22/zulip,timabbott/zulip,kou/zulip,brainwane/zulip,vabs22/zulip,eeshangarg/zulip,shubhamdhama/zulip,synicalsyntax/zulip,jrowan/zulip,jrowan/zulip,synicalsyntax/zulip,jackrzhang/zulip,vaidap/zulip,zulip/zulip,tommyip/zulip,Galexrt/zulip,punchagan/zulip,shubhamdhama/zulip,vaidap/zulip,rishig/zulip,jrowan/zulip,jackrzhang/zulip,tommyip/zulip,rht/zulip,punchagan/zulip,verma-varsha/zulip,shubhamdhama/zulip,mahim97/zulip,rht/zulip,punchagan/zulip,tommyip/zulip,mahim97/zulip,brockwhittaker/zulip,hackerkid/zulip,eeshangarg/zulip,zulip/zulip,punchagan/zulip,Galexrt/zulip,jackrzhang/zulip,zulip/zulip,brainwane/zulip,rishig/zulip,timabbott/zulip,jackrzhang/zulip,hackerkid/zulip,zulip/zulip,hackerkid/zulip,synicalsyntax/zulip,amanharitsh123/zulip,amanharitsh123/zulip,showell/zulip,timabbott/zulip,eeshangarg/zulip,zulip/zulip,brainwane/zulip,andersk/zulip,kou/zulip,rishig/zulip,andersk/zulip,brockwhittaker/zulip,vaidap/zulip,showell/zulip,eeshangarg/zulip,hackerkid/zulip,brockwhittaker/zulip,amanharitsh123/zulip,timabbott/zulip,brockwhittaker/zulip,vabs22/zulip,hackerkid/zulip,rishig/zulip,kou/zulip,rht/zulip,tommyip/zulip,shubhamdhama/zulip,punchagan/zulip,jrowan/zulip,rht/zulip,andersk/zulip,jackrzhang/zulip,synicalsyntax/zulip,vaidap/zulip,jackrzhang/zulip,vabs22/zulip,dhcrzf/zulip,synicalsyntax/zulip,timabbott/zulip,showell/zulip,verma-varsha/zulip,shubhamdhama/zulip,verma-varsha/zulip,kou/zulip,amanharitsh123/zulip,rishig/zulip,brainwane/zulip,dhcrzf/zulip,dhcrzf/zulip,showell/zulip,rht/zulip,hackerkid/zulip,vaidap/zulip,Galexrt/zulip,tommyip/zulip,Galexrt/zulip,synicalsyntax/zulip,eeshangarg/zulip,Galexrt/zulip,amanharitsh123/zulip,eeshangarg/zulip,dhcrzf/zulip,dhcrzf/zulip,verma-varsha/zulip,vabs22/zulip,synicalsyntax/zulip,shubhamdhama/zulip,brainwane/zulip,andersk/zulip,andersk/zulip,verma-varsha/zulip,mahim97/zulip,showell/zulip,punchagan/zulip,mahim97/zulip,shubhamdhama/zulip,mahim97/zulip,hackerkid/zulip,vaidap/zulip,punchagan/zulip,rht/zulip,timabbott/zulip,jrowan/zulip,kou/zulip
|
ZULIP_VERSION = "1.6.0+git"
PROVISION_VERSION = '5.7'
Bump PROVISION_VERSION for recent package upgrades.
|
ZULIP_VERSION = "1.6.0+git"
PROVISION_VERSION = '5.8'
|
<commit_before>ZULIP_VERSION = "1.6.0+git"
PROVISION_VERSION = '5.7'
<commit_msg>Bump PROVISION_VERSION for recent package upgrades.<commit_after>
|
ZULIP_VERSION = "1.6.0+git"
PROVISION_VERSION = '5.8'
|
ZULIP_VERSION = "1.6.0+git"
PROVISION_VERSION = '5.7'
Bump PROVISION_VERSION for recent package upgrades.ZULIP_VERSION = "1.6.0+git"
PROVISION_VERSION = '5.8'
|
<commit_before>ZULIP_VERSION = "1.6.0+git"
PROVISION_VERSION = '5.7'
<commit_msg>Bump PROVISION_VERSION for recent package upgrades.<commit_after>ZULIP_VERSION = "1.6.0+git"
PROVISION_VERSION = '5.8'
|
bac6fb1d5287452bdda0dc9d6b730ebd092ae3ab
|
codingame/power_of_thor.py
|
codingame/power_of_thor.py
|
# LX: the X position of the light of power
# LY: the Y position of the light of power
# x: Thor's starting X position
# y: Thor's starting Y position
LX, LY, x, y = [int(i) for i in raw_input().split()]
# Game loop
while True:
E = int(raw_input()) # The level of Thor's remaining energy, representing the number of moves he can still make.
result = ""
if (y < LY):
result = "S"
y += 1
elif (LY < y):
result = "N"
y -= 1
if (x < LX):
result += "E"
x += 1
elif (LX < x):
result += "W"
x -= 1
print result
|
Add CodinGame Power of Thor
|
Add CodinGame Power of Thor
|
Python
|
mit
|
AntoineAugusti/katas,AntoineAugusti/katas,AntoineAugusti/katas
|
Add CodinGame Power of Thor
|
# LX: the X position of the light of power
# LY: the Y position of the light of power
# x: Thor's starting X position
# y: Thor's starting Y position
LX, LY, x, y = [int(i) for i in raw_input().split()]
# Game loop
while True:
E = int(raw_input()) # The level of Thor's remaining energy, representing the number of moves he can still make.
result = ""
if (y < LY):
result = "S"
y += 1
elif (LY < y):
result = "N"
y -= 1
if (x < LX):
result += "E"
x += 1
elif (LX < x):
result += "W"
x -= 1
print result
|
<commit_before><commit_msg>Add CodinGame Power of Thor<commit_after>
|
# LX: the X position of the light of power
# LY: the Y position of the light of power
# x: Thor's starting X position
# y: Thor's starting Y position
LX, LY, x, y = [int(i) for i in raw_input().split()]
# Game loop
while True:
E = int(raw_input()) # The level of Thor's remaining energy, representing the number of moves he can still make.
result = ""
if (y < LY):
result = "S"
y += 1
elif (LY < y):
result = "N"
y -= 1
if (x < LX):
result += "E"
x += 1
elif (LX < x):
result += "W"
x -= 1
print result
|
Add CodinGame Power of Thor# LX: the X position of the light of power
# LY: the Y position of the light of power
# x: Thor's starting X position
# y: Thor's starting Y position
LX, LY, x, y = [int(i) for i in raw_input().split()]
# Game loop
while True:
E = int(raw_input()) # The level of Thor's remaining energy, representing the number of moves he can still make.
result = ""
if (y < LY):
result = "S"
y += 1
elif (LY < y):
result = "N"
y -= 1
if (x < LX):
result += "E"
x += 1
elif (LX < x):
result += "W"
x -= 1
print result
|
<commit_before><commit_msg>Add CodinGame Power of Thor<commit_after># LX: the X position of the light of power
# LY: the Y position of the light of power
# x: Thor's starting X position
# y: Thor's starting Y position
LX, LY, x, y = [int(i) for i in raw_input().split()]
# Game loop
while True:
E = int(raw_input()) # The level of Thor's remaining energy, representing the number of moves he can still make.
result = ""
if (y < LY):
result = "S"
y += 1
elif (LY < y):
result = "N"
y -= 1
if (x < LX):
result += "E"
x += 1
elif (LX < x):
result += "W"
x -= 1
print result
|
|
d47abe23bf1c88520ee6ecf954dadaae42142366
|
src/engine/SCons/Tool/javacTests.py
|
src/engine/SCons/Tool/javacTests.py
|
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import os
import unittest
import SCons.Tool.javac
class pathoptTestCase(unittest.TestCase):
def assert_pathopt(self, expect, path):
popt = SCons.Tool.javac.pathopt('-foopath', 'FOOPATH')
env = {'FOOPATH': path}
actual = popt(None, None, env, None)
self.assertEquals(expect, actual)
def test_unset(self):
self.assert_pathopt([], None)
self.assert_pathopt([], '')
def test_str(self):
self.assert_pathopt(['-foopath', '/foo/bar'],
'/foo/bar')
def test_list_str(self):
self.assert_pathopt(['-foopath', '/foo%s/bar' % os.pathsep],
['/foo', '/bar'])
def test_uses_pathsep(self):
save = os.pathsep
try:
os.pathsep = '!'
self.assert_pathopt(['-foopath', 'foo!bar'],
['foo', 'bar'])
finally:
os.pathsep = save
def test_default(self):
popt = SCons.Tool.javac.pathopt('-foopath', 'FOOPATH', default='DPATH')
env = {'FOOPATH': ['/foo', '/bar'],
'DPATH': '/baz'}
expect = ['-foopath', os.pathsep.join(['/foo', '/bar', '/baz'])]
actual = popt(None, None, env, None)
self.assertEquals(expect, actual)
if __name__ == "__main__":
unittest.main()
|
Add unit tests for SCons.Tool.javac (pathopt class only). This carefully avoids known bugs in order to have passing tests.
|
Add unit tests for SCons.Tool.javac (pathopt class only).
This carefully avoids known bugs in order to have passing tests.
|
Python
|
mit
|
Distrotech/scons,Distrotech/scons,Distrotech/scons,Distrotech/scons,Distrotech/scons
|
Add unit tests for SCons.Tool.javac (pathopt class only).
This carefully avoids known bugs in order to have passing tests.
|
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import os
import unittest
import SCons.Tool.javac
class pathoptTestCase(unittest.TestCase):
def assert_pathopt(self, expect, path):
popt = SCons.Tool.javac.pathopt('-foopath', 'FOOPATH')
env = {'FOOPATH': path}
actual = popt(None, None, env, None)
self.assertEquals(expect, actual)
def test_unset(self):
self.assert_pathopt([], None)
self.assert_pathopt([], '')
def test_str(self):
self.assert_pathopt(['-foopath', '/foo/bar'],
'/foo/bar')
def test_list_str(self):
self.assert_pathopt(['-foopath', '/foo%s/bar' % os.pathsep],
['/foo', '/bar'])
def test_uses_pathsep(self):
save = os.pathsep
try:
os.pathsep = '!'
self.assert_pathopt(['-foopath', 'foo!bar'],
['foo', 'bar'])
finally:
os.pathsep = save
def test_default(self):
popt = SCons.Tool.javac.pathopt('-foopath', 'FOOPATH', default='DPATH')
env = {'FOOPATH': ['/foo', '/bar'],
'DPATH': '/baz'}
expect = ['-foopath', os.pathsep.join(['/foo', '/bar', '/baz'])]
actual = popt(None, None, env, None)
self.assertEquals(expect, actual)
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add unit tests for SCons.Tool.javac (pathopt class only).
This carefully avoids known bugs in order to have passing tests.<commit_after>
|
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import os
import unittest
import SCons.Tool.javac
class pathoptTestCase(unittest.TestCase):
def assert_pathopt(self, expect, path):
popt = SCons.Tool.javac.pathopt('-foopath', 'FOOPATH')
env = {'FOOPATH': path}
actual = popt(None, None, env, None)
self.assertEquals(expect, actual)
def test_unset(self):
self.assert_pathopt([], None)
self.assert_pathopt([], '')
def test_str(self):
self.assert_pathopt(['-foopath', '/foo/bar'],
'/foo/bar')
def test_list_str(self):
self.assert_pathopt(['-foopath', '/foo%s/bar' % os.pathsep],
['/foo', '/bar'])
def test_uses_pathsep(self):
save = os.pathsep
try:
os.pathsep = '!'
self.assert_pathopt(['-foopath', 'foo!bar'],
['foo', 'bar'])
finally:
os.pathsep = save
def test_default(self):
popt = SCons.Tool.javac.pathopt('-foopath', 'FOOPATH', default='DPATH')
env = {'FOOPATH': ['/foo', '/bar'],
'DPATH': '/baz'}
expect = ['-foopath', os.pathsep.join(['/foo', '/bar', '/baz'])]
actual = popt(None, None, env, None)
self.assertEquals(expect, actual)
if __name__ == "__main__":
unittest.main()
|
Add unit tests for SCons.Tool.javac (pathopt class only).
This carefully avoids known bugs in order to have passing tests.#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import os
import unittest
import SCons.Tool.javac
class pathoptTestCase(unittest.TestCase):
def assert_pathopt(self, expect, path):
popt = SCons.Tool.javac.pathopt('-foopath', 'FOOPATH')
env = {'FOOPATH': path}
actual = popt(None, None, env, None)
self.assertEquals(expect, actual)
def test_unset(self):
self.assert_pathopt([], None)
self.assert_pathopt([], '')
def test_str(self):
self.assert_pathopt(['-foopath', '/foo/bar'],
'/foo/bar')
def test_list_str(self):
self.assert_pathopt(['-foopath', '/foo%s/bar' % os.pathsep],
['/foo', '/bar'])
def test_uses_pathsep(self):
save = os.pathsep
try:
os.pathsep = '!'
self.assert_pathopt(['-foopath', 'foo!bar'],
['foo', 'bar'])
finally:
os.pathsep = save
def test_default(self):
popt = SCons.Tool.javac.pathopt('-foopath', 'FOOPATH', default='DPATH')
env = {'FOOPATH': ['/foo', '/bar'],
'DPATH': '/baz'}
expect = ['-foopath', os.pathsep.join(['/foo', '/bar', '/baz'])]
actual = popt(None, None, env, None)
self.assertEquals(expect, actual)
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add unit tests for SCons.Tool.javac (pathopt class only).
This carefully avoids known bugs in order to have passing tests.<commit_after>#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import os
import unittest
import SCons.Tool.javac
class pathoptTestCase(unittest.TestCase):
def assert_pathopt(self, expect, path):
popt = SCons.Tool.javac.pathopt('-foopath', 'FOOPATH')
env = {'FOOPATH': path}
actual = popt(None, None, env, None)
self.assertEquals(expect, actual)
def test_unset(self):
self.assert_pathopt([], None)
self.assert_pathopt([], '')
def test_str(self):
self.assert_pathopt(['-foopath', '/foo/bar'],
'/foo/bar')
def test_list_str(self):
self.assert_pathopt(['-foopath', '/foo%s/bar' % os.pathsep],
['/foo', '/bar'])
def test_uses_pathsep(self):
save = os.pathsep
try:
os.pathsep = '!'
self.assert_pathopt(['-foopath', 'foo!bar'],
['foo', 'bar'])
finally:
os.pathsep = save
def test_default(self):
popt = SCons.Tool.javac.pathopt('-foopath', 'FOOPATH', default='DPATH')
env = {'FOOPATH': ['/foo', '/bar'],
'DPATH': '/baz'}
expect = ['-foopath', os.pathsep.join(['/foo', '/bar', '/baz'])]
actual = popt(None, None, env, None)
self.assertEquals(expect, actual)
if __name__ == "__main__":
unittest.main()
|
|
0664983f6182fca7147d97fd2350bba3655da22a
|
CodeFights/weakNumbers.py
|
CodeFights/weakNumbers.py
|
#!/usr/local/bin/python
# Code Fights Weak Numbers Problem
def weakNumbers(n):
def get_divisors(n):
divs = []
for i in range(1, n + 1):
count = 0
for d in range(1, i + 1):
if i % d == 0:
count += 1
divs.append(count)
return divs
divs = get_divisors(n)
w = []
def main():
tests = [
[9, [2, 2]],
[1, [0, 1]],
[2, [0, 2]],
[7, [2, 1]],
[500, [403, 1]],
[4, [0, 4]]
]
for t in tests:
res = weakNumbers(t[0])
ans = t[1]
if ans == res:
print("PASSED: weakNumbers({}) returned {}"
.format(t[0], res))
else:
print("FAILED: weakNumbers({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
Set up Code Fights weak numbers problem
|
Set up Code Fights weak numbers problem
|
Python
|
mit
|
HKuz/Test_Code
|
Set up Code Fights weak numbers problem
|
#!/usr/local/bin/python
# Code Fights Weak Numbers Problem
def weakNumbers(n):
def get_divisors(n):
divs = []
for i in range(1, n + 1):
count = 0
for d in range(1, i + 1):
if i % d == 0:
count += 1
divs.append(count)
return divs
divs = get_divisors(n)
w = []
def main():
tests = [
[9, [2, 2]],
[1, [0, 1]],
[2, [0, 2]],
[7, [2, 1]],
[500, [403, 1]],
[4, [0, 4]]
]
for t in tests:
res = weakNumbers(t[0])
ans = t[1]
if ans == res:
print("PASSED: weakNumbers({}) returned {}"
.format(t[0], res))
else:
print("FAILED: weakNumbers({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Set up Code Fights weak numbers problem<commit_after>
|
#!/usr/local/bin/python
# Code Fights Weak Numbers Problem
def weakNumbers(n):
def get_divisors(n):
divs = []
for i in range(1, n + 1):
count = 0
for d in range(1, i + 1):
if i % d == 0:
count += 1
divs.append(count)
return divs
divs = get_divisors(n)
w = []
def main():
tests = [
[9, [2, 2]],
[1, [0, 1]],
[2, [0, 2]],
[7, [2, 1]],
[500, [403, 1]],
[4, [0, 4]]
]
for t in tests:
res = weakNumbers(t[0])
ans = t[1]
if ans == res:
print("PASSED: weakNumbers({}) returned {}"
.format(t[0], res))
else:
print("FAILED: weakNumbers({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
Set up Code Fights weak numbers problem#!/usr/local/bin/python
# Code Fights Weak Numbers Problem
def weakNumbers(n):
def get_divisors(n):
divs = []
for i in range(1, n + 1):
count = 0
for d in range(1, i + 1):
if i % d == 0:
count += 1
divs.append(count)
return divs
divs = get_divisors(n)
w = []
def main():
tests = [
[9, [2, 2]],
[1, [0, 1]],
[2, [0, 2]],
[7, [2, 1]],
[500, [403, 1]],
[4, [0, 4]]
]
for t in tests:
res = weakNumbers(t[0])
ans = t[1]
if ans == res:
print("PASSED: weakNumbers({}) returned {}"
.format(t[0], res))
else:
print("FAILED: weakNumbers({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Set up Code Fights weak numbers problem<commit_after>#!/usr/local/bin/python
# Code Fights Weak Numbers Problem
def weakNumbers(n):
def get_divisors(n):
divs = []
for i in range(1, n + 1):
count = 0
for d in range(1, i + 1):
if i % d == 0:
count += 1
divs.append(count)
return divs
divs = get_divisors(n)
w = []
def main():
tests = [
[9, [2, 2]],
[1, [0, 1]],
[2, [0, 2]],
[7, [2, 1]],
[500, [403, 1]],
[4, [0, 4]]
]
for t in tests:
res = weakNumbers(t[0])
ans = t[1]
if ans == res:
print("PASSED: weakNumbers({}) returned {}"
.format(t[0], res))
else:
print("FAILED: weakNumbers({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
|
beb3882b89b41ca104dbb9f2fb97f609f45ce106
|
corehq/apps/users/decorators.py
|
corehq/apps/users/decorators.py
|
from django.http import HttpResponseForbidden
from corehq.apps.domain.decorators import login_and_domain_required
def require_permission(permission, data=None, login_decorator=login_and_domain_required):
try:
permission = permission.name
except AttributeError:
try:
permission = permission.__name__
except AttributeError:
pass
def decorator(view_func):
def _inner(request, domain, *args, **kwargs):
if hasattr(request, "couch_user") and (request.user.is_superuser or request.couch_user.has_permission(domain, permission, data=data)):
if login_decorator:
return login_decorator(view_func)(request, domain, *args, **kwargs)
else:
return view_func(request, domain, *args, **kwargs)
else:
return HttpResponseForbidden()
return _inner
return decorator
|
from django.http import HttpResponseForbidden
from corehq.apps.domain.decorators import login_and_domain_required
def require_permission(permission, data=None, login_decorator=login_and_domain_required):
try:
permission = permission.name
except AttributeError:
try:
permission = permission.__name__
except AttributeError:
pass
def decorator(view_func):
def _inner(request, domain, *args, **kwargs):
if hasattr(request, "couch_user") and (request.user.is_superuser or request.couch_user.has_permission(domain, permission, data=data)):
return view_func(request, domain, *args, **kwargs)
else:
return HttpResponseForbidden()
if login_decorator:
return login_decorator(_inner)
else:
return _inner
return decorator
|
Apply login decorator before permissions check; less 403s, more 302s
|
Apply login decorator before permissions check; less 403s, more 302s
|
Python
|
bsd-3-clause
|
puttarajubr/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,SEL-Columbia/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,gmimano/commcaretest,SEL-Columbia/commcare-hq,dimagi/commcare-hq,gmimano/commcaretest,SEL-Columbia/commcare-hq,gmimano/commcaretest,dimagi/commcare-hq,puttarajubr/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,puttarajubr/commcare-hq
|
from django.http import HttpResponseForbidden
from corehq.apps.domain.decorators import login_and_domain_required
def require_permission(permission, data=None, login_decorator=login_and_domain_required):
try:
permission = permission.name
except AttributeError:
try:
permission = permission.__name__
except AttributeError:
pass
def decorator(view_func):
def _inner(request, domain, *args, **kwargs):
if hasattr(request, "couch_user") and (request.user.is_superuser or request.couch_user.has_permission(domain, permission, data=data)):
if login_decorator:
return login_decorator(view_func)(request, domain, *args, **kwargs)
else:
return view_func(request, domain, *args, **kwargs)
else:
return HttpResponseForbidden()
return _inner
return decorator
Apply login decorator before permissions check; less 403s, more 302s
|
from django.http import HttpResponseForbidden
from corehq.apps.domain.decorators import login_and_domain_required
def require_permission(permission, data=None, login_decorator=login_and_domain_required):
try:
permission = permission.name
except AttributeError:
try:
permission = permission.__name__
except AttributeError:
pass
def decorator(view_func):
def _inner(request, domain, *args, **kwargs):
if hasattr(request, "couch_user") and (request.user.is_superuser or request.couch_user.has_permission(domain, permission, data=data)):
return view_func(request, domain, *args, **kwargs)
else:
return HttpResponseForbidden()
if login_decorator:
return login_decorator(_inner)
else:
return _inner
return decorator
|
<commit_before>from django.http import HttpResponseForbidden
from corehq.apps.domain.decorators import login_and_domain_required
def require_permission(permission, data=None, login_decorator=login_and_domain_required):
try:
permission = permission.name
except AttributeError:
try:
permission = permission.__name__
except AttributeError:
pass
def decorator(view_func):
def _inner(request, domain, *args, **kwargs):
if hasattr(request, "couch_user") and (request.user.is_superuser or request.couch_user.has_permission(domain, permission, data=data)):
if login_decorator:
return login_decorator(view_func)(request, domain, *args, **kwargs)
else:
return view_func(request, domain, *args, **kwargs)
else:
return HttpResponseForbidden()
return _inner
return decorator
<commit_msg>Apply login decorator before permissions check; less 403s, more 302s<commit_after>
|
from django.http import HttpResponseForbidden
from corehq.apps.domain.decorators import login_and_domain_required
def require_permission(permission, data=None, login_decorator=login_and_domain_required):
try:
permission = permission.name
except AttributeError:
try:
permission = permission.__name__
except AttributeError:
pass
def decorator(view_func):
def _inner(request, domain, *args, **kwargs):
if hasattr(request, "couch_user") and (request.user.is_superuser or request.couch_user.has_permission(domain, permission, data=data)):
return view_func(request, domain, *args, **kwargs)
else:
return HttpResponseForbidden()
if login_decorator:
return login_decorator(_inner)
else:
return _inner
return decorator
|
from django.http import HttpResponseForbidden
from corehq.apps.domain.decorators import login_and_domain_required
def require_permission(permission, data=None, login_decorator=login_and_domain_required):
try:
permission = permission.name
except AttributeError:
try:
permission = permission.__name__
except AttributeError:
pass
def decorator(view_func):
def _inner(request, domain, *args, **kwargs):
if hasattr(request, "couch_user") and (request.user.is_superuser or request.couch_user.has_permission(domain, permission, data=data)):
if login_decorator:
return login_decorator(view_func)(request, domain, *args, **kwargs)
else:
return view_func(request, domain, *args, **kwargs)
else:
return HttpResponseForbidden()
return _inner
return decorator
Apply login decorator before permissions check; less 403s, more 302sfrom django.http import HttpResponseForbidden
from corehq.apps.domain.decorators import login_and_domain_required
def require_permission(permission, data=None, login_decorator=login_and_domain_required):
try:
permission = permission.name
except AttributeError:
try:
permission = permission.__name__
except AttributeError:
pass
def decorator(view_func):
def _inner(request, domain, *args, **kwargs):
if hasattr(request, "couch_user") and (request.user.is_superuser or request.couch_user.has_permission(domain, permission, data=data)):
return view_func(request, domain, *args, **kwargs)
else:
return HttpResponseForbidden()
if login_decorator:
return login_decorator(_inner)
else:
return _inner
return decorator
|
<commit_before>from django.http import HttpResponseForbidden
from corehq.apps.domain.decorators import login_and_domain_required
def require_permission(permission, data=None, login_decorator=login_and_domain_required):
try:
permission = permission.name
except AttributeError:
try:
permission = permission.__name__
except AttributeError:
pass
def decorator(view_func):
def _inner(request, domain, *args, **kwargs):
if hasattr(request, "couch_user") and (request.user.is_superuser or request.couch_user.has_permission(domain, permission, data=data)):
if login_decorator:
return login_decorator(view_func)(request, domain, *args, **kwargs)
else:
return view_func(request, domain, *args, **kwargs)
else:
return HttpResponseForbidden()
return _inner
return decorator
<commit_msg>Apply login decorator before permissions check; less 403s, more 302s<commit_after>from django.http import HttpResponseForbidden
from corehq.apps.domain.decorators import login_and_domain_required
def require_permission(permission, data=None, login_decorator=login_and_domain_required):
try:
permission = permission.name
except AttributeError:
try:
permission = permission.__name__
except AttributeError:
pass
def decorator(view_func):
def _inner(request, domain, *args, **kwargs):
if hasattr(request, "couch_user") and (request.user.is_superuser or request.couch_user.has_permission(domain, permission, data=data)):
return view_func(request, domain, *args, **kwargs)
else:
return HttpResponseForbidden()
if login_decorator:
return login_decorator(_inner)
else:
return _inner
return decorator
|
743f999217fa2e87abe9d512a3e71d54386fe151
|
carepoint/tests/models/cph/test_phone.py
|
carepoint/tests/models/cph/test_phone.py
|
# -*- coding: utf-8 -*-
# © 2015-TODAY LasLabs Inc.
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
import unittest
from sqlalchemy.schema import Table
from carepoint.tests.db.db import DatabaseTest
from carepoint.models.cph.phone import Phone
class TestModelsCphPhone(DatabaseTest):
def test_table_initialization(self, ):
self.assertIsInstance(Phone.__table__, Table)
if __name__ == '__main__':
unittest.main()
|
Add missing phone model test in carepoint cph
|
Add missing phone model test in carepoint cph
|
Python
|
mit
|
laslabs/Python-Carepoint
|
Add missing phone model test in carepoint cph
|
# -*- coding: utf-8 -*-
# © 2015-TODAY LasLabs Inc.
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
import unittest
from sqlalchemy.schema import Table
from carepoint.tests.db.db import DatabaseTest
from carepoint.models.cph.phone import Phone
class TestModelsCphPhone(DatabaseTest):
def test_table_initialization(self, ):
self.assertIsInstance(Phone.__table__, Table)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add missing phone model test in carepoint cph<commit_after>
|
# -*- coding: utf-8 -*-
# © 2015-TODAY LasLabs Inc.
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
import unittest
from sqlalchemy.schema import Table
from carepoint.tests.db.db import DatabaseTest
from carepoint.models.cph.phone import Phone
class TestModelsCphPhone(DatabaseTest):
def test_table_initialization(self, ):
self.assertIsInstance(Phone.__table__, Table)
if __name__ == '__main__':
unittest.main()
|
Add missing phone model test in carepoint cph# -*- coding: utf-8 -*-
# © 2015-TODAY LasLabs Inc.
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
import unittest
from sqlalchemy.schema import Table
from carepoint.tests.db.db import DatabaseTest
from carepoint.models.cph.phone import Phone
class TestModelsCphPhone(DatabaseTest):
def test_table_initialization(self, ):
self.assertIsInstance(Phone.__table__, Table)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add missing phone model test in carepoint cph<commit_after># -*- coding: utf-8 -*-
# © 2015-TODAY LasLabs Inc.
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
import unittest
from sqlalchemy.schema import Table
from carepoint.tests.db.db import DatabaseTest
from carepoint.models.cph.phone import Phone
class TestModelsCphPhone(DatabaseTest):
def test_table_initialization(self, ):
self.assertIsInstance(Phone.__table__, Table)
if __name__ == '__main__':
unittest.main()
|
|
eec8b527b54efd516eebfbc056d35158316fc222
|
client/examples/update_image_metadata.py
|
client/examples/update_image_metadata.py
|
# -*- coding: utf-8 -*-
#
# * Copyright (c) 2009-2015. Authors: see NOTICE file.
# *
# * Licensed under the Apache License, Version 2.0 (the "License");
# * you may not use this file except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# */
__author__ = "Marée Raphaël <raphael.maree@ulg.ac.be>"
__copyright__ = "Copyright 2010-2016 University of Liège, Belgium, http://www.cytomine.be/"
from cytomine import Cytomine
from cytomine.models import *
#Cytomine connection parameters
cytomine_host=""
cytomine_public_key=""
cytomine_private_key=""
#Connection to Cytomine Core
conn = Cytomine(cytomine_host, cytomine_public_key, cytomine_private_key, base_path = '/api/', working_path = '/tmp/', verbose= True)
#Adapt with your parameters
id_project=10529443 #project id
new_magnification=40 #new image magnification
new_resolution=0.65 #new image resolution
#Get image instances from project
image_instances = ImageInstanceCollection()
image_instances.project = id_project
image_instances = conn.fetch(image_instances)
images = image_instances.data()
print "Nb images in project: %d" %len(images)
for image in images:
print image
abstractimage = conn.get_image(image.baseImage)
conn.edit_image(image.baseImage,magnification=new_magnification,resolution=new_resolution)
abstractimage = conn.get_image(image.baseImage)
#print "after: %d" %abstractimage.magnification
|
Update image metadata in a project
|
Update image metadata in a project
|
Python
|
apache-2.0
|
cytomine/Cytomine-python-client,cytomine/Cytomine-python-client
|
Update image metadata in a project
|
# -*- coding: utf-8 -*-
#
# * Copyright (c) 2009-2015. Authors: see NOTICE file.
# *
# * Licensed under the Apache License, Version 2.0 (the "License");
# * you may not use this file except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# */
__author__ = "Marée Raphaël <raphael.maree@ulg.ac.be>"
__copyright__ = "Copyright 2010-2016 University of Liège, Belgium, http://www.cytomine.be/"
from cytomine import Cytomine
from cytomine.models import *
#Cytomine connection parameters
cytomine_host=""
cytomine_public_key=""
cytomine_private_key=""
#Connection to Cytomine Core
conn = Cytomine(cytomine_host, cytomine_public_key, cytomine_private_key, base_path = '/api/', working_path = '/tmp/', verbose= True)
#Adapt with your parameters
id_project=10529443 #project id
new_magnification=40 #new image magnification
new_resolution=0.65 #new image resolution
#Get image instances from project
image_instances = ImageInstanceCollection()
image_instances.project = id_project
image_instances = conn.fetch(image_instances)
images = image_instances.data()
print "Nb images in project: %d" %len(images)
for image in images:
print image
abstractimage = conn.get_image(image.baseImage)
conn.edit_image(image.baseImage,magnification=new_magnification,resolution=new_resolution)
abstractimage = conn.get_image(image.baseImage)
#print "after: %d" %abstractimage.magnification
|
<commit_before><commit_msg>Update image metadata in a project<commit_after>
|
# -*- coding: utf-8 -*-
#
# * Copyright (c) 2009-2015. Authors: see NOTICE file.
# *
# * Licensed under the Apache License, Version 2.0 (the "License");
# * you may not use this file except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# */
__author__ = "Marée Raphaël <raphael.maree@ulg.ac.be>"
__copyright__ = "Copyright 2010-2016 University of Liège, Belgium, http://www.cytomine.be/"
from cytomine import Cytomine
from cytomine.models import *
#Cytomine connection parameters
cytomine_host=""
cytomine_public_key=""
cytomine_private_key=""
#Connection to Cytomine Core
conn = Cytomine(cytomine_host, cytomine_public_key, cytomine_private_key, base_path = '/api/', working_path = '/tmp/', verbose= True)
#Adapt with your parameters
id_project=10529443 #project id
new_magnification=40 #new image magnification
new_resolution=0.65 #new image resolution
#Get image instances from project
image_instances = ImageInstanceCollection()
image_instances.project = id_project
image_instances = conn.fetch(image_instances)
images = image_instances.data()
print "Nb images in project: %d" %len(images)
for image in images:
print image
abstractimage = conn.get_image(image.baseImage)
conn.edit_image(image.baseImage,magnification=new_magnification,resolution=new_resolution)
abstractimage = conn.get_image(image.baseImage)
#print "after: %d" %abstractimage.magnification
|
Update image metadata in a project# -*- coding: utf-8 -*-
#
# * Copyright (c) 2009-2015. Authors: see NOTICE file.
# *
# * Licensed under the Apache License, Version 2.0 (the "License");
# * you may not use this file except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# */
__author__ = "Marée Raphaël <raphael.maree@ulg.ac.be>"
__copyright__ = "Copyright 2010-2016 University of Liège, Belgium, http://www.cytomine.be/"
from cytomine import Cytomine
from cytomine.models import *
#Cytomine connection parameters
cytomine_host=""
cytomine_public_key=""
cytomine_private_key=""
#Connection to Cytomine Core
conn = Cytomine(cytomine_host, cytomine_public_key, cytomine_private_key, base_path = '/api/', working_path = '/tmp/', verbose= True)
#Adapt with your parameters
id_project=10529443 #project id
new_magnification=40 #new image magnification
new_resolution=0.65 #new image resolution
#Get image instances from project
image_instances = ImageInstanceCollection()
image_instances.project = id_project
image_instances = conn.fetch(image_instances)
images = image_instances.data()
print "Nb images in project: %d" %len(images)
for image in images:
print image
abstractimage = conn.get_image(image.baseImage)
conn.edit_image(image.baseImage,magnification=new_magnification,resolution=new_resolution)
abstractimage = conn.get_image(image.baseImage)
#print "after: %d" %abstractimage.magnification
|
<commit_before><commit_msg>Update image metadata in a project<commit_after># -*- coding: utf-8 -*-
#
# * Copyright (c) 2009-2015. Authors: see NOTICE file.
# *
# * Licensed under the Apache License, Version 2.0 (the "License");
# * you may not use this file except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# */
__author__ = "Marée Raphaël <raphael.maree@ulg.ac.be>"
__copyright__ = "Copyright 2010-2016 University of Liège, Belgium, http://www.cytomine.be/"
from cytomine import Cytomine
from cytomine.models import *
#Cytomine connection parameters
cytomine_host=""
cytomine_public_key=""
cytomine_private_key=""
#Connection to Cytomine Core
conn = Cytomine(cytomine_host, cytomine_public_key, cytomine_private_key, base_path = '/api/', working_path = '/tmp/', verbose= True)
#Adapt with your parameters
id_project=10529443 #project id
new_magnification=40 #new image magnification
new_resolution=0.65 #new image resolution
#Get image instances from project
image_instances = ImageInstanceCollection()
image_instances.project = id_project
image_instances = conn.fetch(image_instances)
images = image_instances.data()
print "Nb images in project: %d" %len(images)
for image in images:
print image
abstractimage = conn.get_image(image.baseImage)
conn.edit_image(image.baseImage,magnification=new_magnification,resolution=new_resolution)
abstractimage = conn.get_image(image.baseImage)
#print "after: %d" %abstractimage.magnification
|
|
35f779cb56e4c710fb5321bd22d8187670993f62
|
sci_lib.py
|
sci_lib.py
|
#!/usr/bin/python
#Author: Scott T. Salesky
#Created: 12.6.2014
#Purpose: Collection of functions, routines to use
#Python for scientific work
#----------------------------------------------
|
Add function to read 3d direct access Fortran binary files into NumPy arrays.
|
Add function to read 3d direct access Fortran binary files into NumPy arrays.
|
Python
|
mit
|
ssalesky/Science-Library
|
Add function to read 3d direct access Fortran binary files into NumPy arrays.
|
#!/usr/bin/python
#Author: Scott T. Salesky
#Created: 12.6.2014
#Purpose: Collection of functions, routines to use
#Python for scientific work
#----------------------------------------------
|
<commit_before><commit_msg>Add function to read 3d direct access Fortran binary files into NumPy arrays.<commit_after>
|
#!/usr/bin/python
#Author: Scott T. Salesky
#Created: 12.6.2014
#Purpose: Collection of functions, routines to use
#Python for scientific work
#----------------------------------------------
|
Add function to read 3d direct access Fortran binary files into NumPy arrays.#!/usr/bin/python
#Author: Scott T. Salesky
#Created: 12.6.2014
#Purpose: Collection of functions, routines to use
#Python for scientific work
#----------------------------------------------
|
<commit_before><commit_msg>Add function to read 3d direct access Fortran binary files into NumPy arrays.<commit_after>#!/usr/bin/python
#Author: Scott T. Salesky
#Created: 12.6.2014
#Purpose: Collection of functions, routines to use
#Python for scientific work
#----------------------------------------------
|
|
0eda613c7aafb974b1314c93d749b8ceb31a8459
|
court_bulk_task_creator.py
|
court_bulk_task_creator.py
|
from courtreader import readers
from courtutils.database import Database
from courtutils.logger import get_logger
from datetime import datetime, timedelta
import csv
import pymongo
import os
import sys
import time
def get_db_connection():
return pymongo.MongoClient(os.environ['MONGO_DB'])['va_court_search']
start_date = datetime.strptime(sys.argv[1],'%m/%d/%Y')
end_date = datetime.strptime(sys.argv[2],'%m/%d/%Y')
if start_date < end_date:
raise ValueError('Start Date must be after End Date so they decend')
courts = list(Database.get_circuit_courts())
tasks = []
for court in courts:
tasks.append({
'court_fips': court['fips_code'],
'start_date': start_date,
'end_date': end_date
})
db = get_db_connection()
db.circuit_court_date_tasks.insert_many(tasks)
|
Add bulk collection task creator
|
Add bulk collection task creator
|
Python
|
mit
|
bschoenfeld/va-court-scraper,bschoenfeld/va-court-scraper
|
Add bulk collection task creator
|
from courtreader import readers
from courtutils.database import Database
from courtutils.logger import get_logger
from datetime import datetime, timedelta
import csv
import pymongo
import os
import sys
import time
def get_db_connection():
return pymongo.MongoClient(os.environ['MONGO_DB'])['va_court_search']
start_date = datetime.strptime(sys.argv[1],'%m/%d/%Y')
end_date = datetime.strptime(sys.argv[2],'%m/%d/%Y')
if start_date < end_date:
raise ValueError('Start Date must be after End Date so they decend')
courts = list(Database.get_circuit_courts())
tasks = []
for court in courts:
tasks.append({
'court_fips': court['fips_code'],
'start_date': start_date,
'end_date': end_date
})
db = get_db_connection()
db.circuit_court_date_tasks.insert_many(tasks)
|
<commit_before><commit_msg>Add bulk collection task creator<commit_after>
|
from courtreader import readers
from courtutils.database import Database
from courtutils.logger import get_logger
from datetime import datetime, timedelta
import csv
import pymongo
import os
import sys
import time
def get_db_connection():
return pymongo.MongoClient(os.environ['MONGO_DB'])['va_court_search']
start_date = datetime.strptime(sys.argv[1],'%m/%d/%Y')
end_date = datetime.strptime(sys.argv[2],'%m/%d/%Y')
if start_date < end_date:
raise ValueError('Start Date must be after End Date so they decend')
courts = list(Database.get_circuit_courts())
tasks = []
for court in courts:
tasks.append({
'court_fips': court['fips_code'],
'start_date': start_date,
'end_date': end_date
})
db = get_db_connection()
db.circuit_court_date_tasks.insert_many(tasks)
|
Add bulk collection task creatorfrom courtreader import readers
from courtutils.database import Database
from courtutils.logger import get_logger
from datetime import datetime, timedelta
import csv
import pymongo
import os
import sys
import time
def get_db_connection():
return pymongo.MongoClient(os.environ['MONGO_DB'])['va_court_search']
start_date = datetime.strptime(sys.argv[1],'%m/%d/%Y')
end_date = datetime.strptime(sys.argv[2],'%m/%d/%Y')
if start_date < end_date:
raise ValueError('Start Date must be after End Date so they decend')
courts = list(Database.get_circuit_courts())
tasks = []
for court in courts:
tasks.append({
'court_fips': court['fips_code'],
'start_date': start_date,
'end_date': end_date
})
db = get_db_connection()
db.circuit_court_date_tasks.insert_many(tasks)
|
<commit_before><commit_msg>Add bulk collection task creator<commit_after>from courtreader import readers
from courtutils.database import Database
from courtutils.logger import get_logger
from datetime import datetime, timedelta
import csv
import pymongo
import os
import sys
import time
def get_db_connection():
return pymongo.MongoClient(os.environ['MONGO_DB'])['va_court_search']
start_date = datetime.strptime(sys.argv[1],'%m/%d/%Y')
end_date = datetime.strptime(sys.argv[2],'%m/%d/%Y')
if start_date < end_date:
raise ValueError('Start Date must be after End Date so they decend')
courts = list(Database.get_circuit_courts())
tasks = []
for court in courts:
tasks.append({
'court_fips': court['fips_code'],
'start_date': start_date,
'end_date': end_date
})
db = get_db_connection()
db.circuit_court_date_tasks.insert_many(tasks)
|
|
d5ce5e325839d6094d163a58fc417bf4ee2a573f
|
tests/regression/test_f468afef89d1.py
|
tests/regression/test_f468afef89d1.py
|
#!/usr/bin/env python
"""when seconds or nano_seconds was set to 0 rather than None (the default) the
value would not be reset to 0 but instead the previous value would remain in
place
this was only visible if a user had previously set a value
"""
from butter.timerfd import Timer
from select import select
def test_f468afef89dTEST_PERIOD():
TEST_PERIOD = 1
TIMEOUT = TEST_PERIOD * 2
# create a timer
timer = Timer()
# set the timer
timer.offset(seconds=TEST_PERIOD, nano_seconds=TEST_PERIOD)
timer.repeats(seconds=TEST_PERIOD, nano_seconds=TEST_PERIOD)
timer.update()
# ensure it fires
r_fd, _, _ = select([timer], [], [])
# reset and update the timer
timer.offset(seconds=0, nano_seconds=0)
timer.repeats(seconds=0, nano_seconds=0)
timer.update()
# we set this twice to get the value we set the timer to
new_val = timer.update()
assert new_val.next_event == (0, 0), 'Timer offset did not get reset'
assert new_val.period == (0, 0), 'Timer period did not get reset'
# ensure it does not fire
select([timer], [], [], TIMEOUT)
|
Put a regression test in place for commit f468afef89d1
|
Put a regression test in place for commit f468afef89d1
|
Python
|
bsd-3-clause
|
dasSOZO/python-butter,wdv4758h/butter
|
Put a regression test in place for commit f468afef89d1
|
#!/usr/bin/env python
"""when seconds or nano_seconds was set to 0 rather than None (the default) the
value would not be reset to 0 but instead the previous value would remain in
place
this was only visible if a user had previously set a value
"""
from butter.timerfd import Timer
from select import select
def test_f468afef89dTEST_PERIOD():
TEST_PERIOD = 1
TIMEOUT = TEST_PERIOD * 2
# create a timer
timer = Timer()
# set the timer
timer.offset(seconds=TEST_PERIOD, nano_seconds=TEST_PERIOD)
timer.repeats(seconds=TEST_PERIOD, nano_seconds=TEST_PERIOD)
timer.update()
# ensure it fires
r_fd, _, _ = select([timer], [], [])
# reset and update the timer
timer.offset(seconds=0, nano_seconds=0)
timer.repeats(seconds=0, nano_seconds=0)
timer.update()
# we set this twice to get the value we set the timer to
new_val = timer.update()
assert new_val.next_event == (0, 0), 'Timer offset did not get reset'
assert new_val.period == (0, 0), 'Timer period did not get reset'
# ensure it does not fire
select([timer], [], [], TIMEOUT)
|
<commit_before><commit_msg>Put a regression test in place for commit f468afef89d1<commit_after>
|
#!/usr/bin/env python
"""when seconds or nano_seconds was set to 0 rather than None (the default) the
value would not be reset to 0 but instead the previous value would remain in
place
this was only visible if a user had previously set a value
"""
from butter.timerfd import Timer
from select import select
def test_f468afef89dTEST_PERIOD():
TEST_PERIOD = 1
TIMEOUT = TEST_PERIOD * 2
# create a timer
timer = Timer()
# set the timer
timer.offset(seconds=TEST_PERIOD, nano_seconds=TEST_PERIOD)
timer.repeats(seconds=TEST_PERIOD, nano_seconds=TEST_PERIOD)
timer.update()
# ensure it fires
r_fd, _, _ = select([timer], [], [])
# reset and update the timer
timer.offset(seconds=0, nano_seconds=0)
timer.repeats(seconds=0, nano_seconds=0)
timer.update()
# we set this twice to get the value we set the timer to
new_val = timer.update()
assert new_val.next_event == (0, 0), 'Timer offset did not get reset'
assert new_val.period == (0, 0), 'Timer period did not get reset'
# ensure it does not fire
select([timer], [], [], TIMEOUT)
|
Put a regression test in place for commit f468afef89d1#!/usr/bin/env python
"""when seconds or nano_seconds was set to 0 rather than None (the default) the
value would not be reset to 0 but instead the previous value would remain in
place
this was only visible if a user had previously set a value
"""
from butter.timerfd import Timer
from select import select
def test_f468afef89dTEST_PERIOD():
TEST_PERIOD = 1
TIMEOUT = TEST_PERIOD * 2
# create a timer
timer = Timer()
# set the timer
timer.offset(seconds=TEST_PERIOD, nano_seconds=TEST_PERIOD)
timer.repeats(seconds=TEST_PERIOD, nano_seconds=TEST_PERIOD)
timer.update()
# ensure it fires
r_fd, _, _ = select([timer], [], [])
# reset and update the timer
timer.offset(seconds=0, nano_seconds=0)
timer.repeats(seconds=0, nano_seconds=0)
timer.update()
# we set this twice to get the value we set the timer to
new_val = timer.update()
assert new_val.next_event == (0, 0), 'Timer offset did not get reset'
assert new_val.period == (0, 0), 'Timer period did not get reset'
# ensure it does not fire
select([timer], [], [], TIMEOUT)
|
<commit_before><commit_msg>Put a regression test in place for commit f468afef89d1<commit_after>#!/usr/bin/env python
"""when seconds or nano_seconds was set to 0 rather than None (the default) the
value would not be reset to 0 but instead the previous value would remain in
place
this was only visible if a user had previously set a value
"""
from butter.timerfd import Timer
from select import select
def test_f468afef89dTEST_PERIOD():
TEST_PERIOD = 1
TIMEOUT = TEST_PERIOD * 2
# create a timer
timer = Timer()
# set the timer
timer.offset(seconds=TEST_PERIOD, nano_seconds=TEST_PERIOD)
timer.repeats(seconds=TEST_PERIOD, nano_seconds=TEST_PERIOD)
timer.update()
# ensure it fires
r_fd, _, _ = select([timer], [], [])
# reset and update the timer
timer.offset(seconds=0, nano_seconds=0)
timer.repeats(seconds=0, nano_seconds=0)
timer.update()
# we set this twice to get the value we set the timer to
new_val = timer.update()
assert new_val.next_event == (0, 0), 'Timer offset did not get reset'
assert new_val.period == (0, 0), 'Timer period did not get reset'
# ensure it does not fire
select([timer], [], [], TIMEOUT)
|
|
8eab4037cef2490b99758554c3d4a17603f19d75
|
bookmarks/feeds.py
|
bookmarks/feeds.py
|
from atomformat import Feed
from django.core.urlresolvers import reverse
from django.conf import settings
from django.contrib.sites.models import Site
from django.contrib.auth.models import User
from django.shortcuts import get_object_or_404
from bookmarks.models import Bookmark
from django.template.defaultfilters import linebreaks, escape, capfirst
from datetime import datetime
ITEMS_PER_FEED = getattr(settings, 'PINAX_ITEMS_PER_FEED', 20)
class BookmarkFeed(Feed):
def item_id(self, bookmark):
return bookmark.url
def item_title(self, bookmark):
return bookmark.description
def item_updated(self, bookmark):
return bookmark.added
def item_published(self, bookmark):
return bookmark.added
def item_content(self, bookmark):
return {"type" : "html", }, linebreaks(escape(bookmark.note))
def item_links(self, bookmark):
return [{"href" : self.item_id(bookmark)}]
def item_authors(self, bookmark):
return [{"name" : bookmark.adder.username}]
def feed_id(self):
return 'http://%s/feeds/bookmarks/' % Site.objects.get_current().domain
def feed_title(self):
return 'Bookmark Feed'
def feed_updated(self):
qs = Bookmark.objects.all()
# We return an arbitrary date if there are no results, because there
# must be a feed_updated field as per the Atom specifications, however
# there is no real data to go by, and an arbitrary date can be static.
if qs.count() == 0:
return datetime(year=2008, month=7, day=1)
return qs.latest('added').added
def feed_links(self):
absolute_url = reverse('bookmarks.views.bookmarks')
complete_url = "http://%s%s" % (
Site.objects.get_current().domain,
absolute_url,
)
return ({'href': complete_url},)
def items(self):
return Bookmark.objects.order_by("-added")[:ITEMS_PER_FEED]
|
Add feed support for bookmarks by ericfo
|
Add feed support for bookmarks by ericfo
git-svn-id: e32780fd4e06e3e07c0119d454e1aebcae894468@13 413268e4-d34f-0410-bd0d-61523dc7b0b6
|
Python
|
mit
|
incuna/incuna-bookmarks,pinax/django-bookmarks,incuna/incuna-bookmarks
|
Add feed support for bookmarks by ericfo
git-svn-id: e32780fd4e06e3e07c0119d454e1aebcae894468@13 413268e4-d34f-0410-bd0d-61523dc7b0b6
|
from atomformat import Feed
from django.core.urlresolvers import reverse
from django.conf import settings
from django.contrib.sites.models import Site
from django.contrib.auth.models import User
from django.shortcuts import get_object_or_404
from bookmarks.models import Bookmark
from django.template.defaultfilters import linebreaks, escape, capfirst
from datetime import datetime
ITEMS_PER_FEED = getattr(settings, 'PINAX_ITEMS_PER_FEED', 20)
class BookmarkFeed(Feed):
def item_id(self, bookmark):
return bookmark.url
def item_title(self, bookmark):
return bookmark.description
def item_updated(self, bookmark):
return bookmark.added
def item_published(self, bookmark):
return bookmark.added
def item_content(self, bookmark):
return {"type" : "html", }, linebreaks(escape(bookmark.note))
def item_links(self, bookmark):
return [{"href" : self.item_id(bookmark)}]
def item_authors(self, bookmark):
return [{"name" : bookmark.adder.username}]
def feed_id(self):
return 'http://%s/feeds/bookmarks/' % Site.objects.get_current().domain
def feed_title(self):
return 'Bookmark Feed'
def feed_updated(self):
qs = Bookmark.objects.all()
# We return an arbitrary date if there are no results, because there
# must be a feed_updated field as per the Atom specifications, however
# there is no real data to go by, and an arbitrary date can be static.
if qs.count() == 0:
return datetime(year=2008, month=7, day=1)
return qs.latest('added').added
def feed_links(self):
absolute_url = reverse('bookmarks.views.bookmarks')
complete_url = "http://%s%s" % (
Site.objects.get_current().domain,
absolute_url,
)
return ({'href': complete_url},)
def items(self):
return Bookmark.objects.order_by("-added")[:ITEMS_PER_FEED]
|
<commit_before><commit_msg>Add feed support for bookmarks by ericfo
git-svn-id: e32780fd4e06e3e07c0119d454e1aebcae894468@13 413268e4-d34f-0410-bd0d-61523dc7b0b6<commit_after>
|
from atomformat import Feed
from django.core.urlresolvers import reverse
from django.conf import settings
from django.contrib.sites.models import Site
from django.contrib.auth.models import User
from django.shortcuts import get_object_or_404
from bookmarks.models import Bookmark
from django.template.defaultfilters import linebreaks, escape, capfirst
from datetime import datetime
ITEMS_PER_FEED = getattr(settings, 'PINAX_ITEMS_PER_FEED', 20)
class BookmarkFeed(Feed):
def item_id(self, bookmark):
return bookmark.url
def item_title(self, bookmark):
return bookmark.description
def item_updated(self, bookmark):
return bookmark.added
def item_published(self, bookmark):
return bookmark.added
def item_content(self, bookmark):
return {"type" : "html", }, linebreaks(escape(bookmark.note))
def item_links(self, bookmark):
return [{"href" : self.item_id(bookmark)}]
def item_authors(self, bookmark):
return [{"name" : bookmark.adder.username}]
def feed_id(self):
return 'http://%s/feeds/bookmarks/' % Site.objects.get_current().domain
def feed_title(self):
return 'Bookmark Feed'
def feed_updated(self):
qs = Bookmark.objects.all()
# We return an arbitrary date if there are no results, because there
# must be a feed_updated field as per the Atom specifications, however
# there is no real data to go by, and an arbitrary date can be static.
if qs.count() == 0:
return datetime(year=2008, month=7, day=1)
return qs.latest('added').added
def feed_links(self):
absolute_url = reverse('bookmarks.views.bookmarks')
complete_url = "http://%s%s" % (
Site.objects.get_current().domain,
absolute_url,
)
return ({'href': complete_url},)
def items(self):
return Bookmark.objects.order_by("-added")[:ITEMS_PER_FEED]
|
Add feed support for bookmarks by ericfo
git-svn-id: e32780fd4e06e3e07c0119d454e1aebcae894468@13 413268e4-d34f-0410-bd0d-61523dc7b0b6from atomformat import Feed
from django.core.urlresolvers import reverse
from django.conf import settings
from django.contrib.sites.models import Site
from django.contrib.auth.models import User
from django.shortcuts import get_object_or_404
from bookmarks.models import Bookmark
from django.template.defaultfilters import linebreaks, escape, capfirst
from datetime import datetime
ITEMS_PER_FEED = getattr(settings, 'PINAX_ITEMS_PER_FEED', 20)
class BookmarkFeed(Feed):
def item_id(self, bookmark):
return bookmark.url
def item_title(self, bookmark):
return bookmark.description
def item_updated(self, bookmark):
return bookmark.added
def item_published(self, bookmark):
return bookmark.added
def item_content(self, bookmark):
return {"type" : "html", }, linebreaks(escape(bookmark.note))
def item_links(self, bookmark):
return [{"href" : self.item_id(bookmark)}]
def item_authors(self, bookmark):
return [{"name" : bookmark.adder.username}]
def feed_id(self):
return 'http://%s/feeds/bookmarks/' % Site.objects.get_current().domain
def feed_title(self):
return 'Bookmark Feed'
def feed_updated(self):
qs = Bookmark.objects.all()
# We return an arbitrary date if there are no results, because there
# must be a feed_updated field as per the Atom specifications, however
# there is no real data to go by, and an arbitrary date can be static.
if qs.count() == 0:
return datetime(year=2008, month=7, day=1)
return qs.latest('added').added
def feed_links(self):
absolute_url = reverse('bookmarks.views.bookmarks')
complete_url = "http://%s%s" % (
Site.objects.get_current().domain,
absolute_url,
)
return ({'href': complete_url},)
def items(self):
return Bookmark.objects.order_by("-added")[:ITEMS_PER_FEED]
|
<commit_before><commit_msg>Add feed support for bookmarks by ericfo
git-svn-id: e32780fd4e06e3e07c0119d454e1aebcae894468@13 413268e4-d34f-0410-bd0d-61523dc7b0b6<commit_after>from atomformat import Feed
from django.core.urlresolvers import reverse
from django.conf import settings
from django.contrib.sites.models import Site
from django.contrib.auth.models import User
from django.shortcuts import get_object_or_404
from bookmarks.models import Bookmark
from django.template.defaultfilters import linebreaks, escape, capfirst
from datetime import datetime
ITEMS_PER_FEED = getattr(settings, 'PINAX_ITEMS_PER_FEED', 20)
class BookmarkFeed(Feed):
def item_id(self, bookmark):
return bookmark.url
def item_title(self, bookmark):
return bookmark.description
def item_updated(self, bookmark):
return bookmark.added
def item_published(self, bookmark):
return bookmark.added
def item_content(self, bookmark):
return {"type" : "html", }, linebreaks(escape(bookmark.note))
def item_links(self, bookmark):
return [{"href" : self.item_id(bookmark)}]
def item_authors(self, bookmark):
return [{"name" : bookmark.adder.username}]
def feed_id(self):
return 'http://%s/feeds/bookmarks/' % Site.objects.get_current().domain
def feed_title(self):
return 'Bookmark Feed'
def feed_updated(self):
qs = Bookmark.objects.all()
# We return an arbitrary date if there are no results, because there
# must be a feed_updated field as per the Atom specifications, however
# there is no real data to go by, and an arbitrary date can be static.
if qs.count() == 0:
return datetime(year=2008, month=7, day=1)
return qs.latest('added').added
def feed_links(self):
absolute_url = reverse('bookmarks.views.bookmarks')
complete_url = "http://%s%s" % (
Site.objects.get_current().domain,
absolute_url,
)
return ({'href': complete_url},)
def items(self):
return Bookmark.objects.order_by("-added")[:ITEMS_PER_FEED]
|
|
07f46255120fd2dc6c33e2c6ed8b215fce9f01c5
|
microdrop/bin/create_portable_config.py
|
microdrop/bin/create_portable_config.py
|
import os
import sys
import pkg_resources
from path_helpers import path
import jinja2
config_template = '''
data_dir = .
[plugins]
# directory containing microdrop plugins
directory = plugins
[microdrop.gui.experiment_log_controller]
notebook_directory = notebooks
[microdrop.gui.dmf_device_controller]
device_directory = devices
'''
launcher_template = '''
cd {{ working_dir }}
{{ py_exe }} -m microdrop.microdrop -c {{ config_path }}
'''
def parse_args(args=None):
'''Parses arguments, returns (options, args).'''
from argparse import ArgumentParser
if args is None:
args = sys.argv
parser = ArgumentParser(description='Create portable MicroDrop settings '
'directory.')
parser.add_argument('output_dir', type=path)
args = parser.parse_args()
return args
def main(output_dir):
output_dir = path(output_dir)
if not output_dir.isdir():
output_dir.makedirs_p()
elif list(output_dir.files()):
raise IOError('Output directory exists and is not empty.')
config_path = output_dir.joinpath('microdrop.ini')
with config_path.open('wb') as output:
template = jinja2.Template(config_template)
config_str = template.render(output_dir=output_dir.name)
output.write(config_str)
py_exe = path(sys.executable).abspath()
launcher_path = output_dir.joinpath('microdrop.bat')
with launcher_path.open('wb') as output:
template = jinja2.Template(launcher_template)
launcher_str = template.render(working_dir=output_dir.abspath(),
py_exe=py_exe,
config_path=config_path.abspath())
output.write(launcher_str)
print 'Start MicroDrop with the following:'
print '\n %s' % launcher_path.abspath()
if __name__ == '__main__':
args = parse_args()
main(args.output_dir)
|
Add script to create init portable settings
|
[NB] Add script to create init portable settings
Usage:
python -m microdrop.bin.create_portable_config [-h] output_dir
Create portable MicroDrop settings directory.
positional arguments:
output_dir
optional arguments:
-h, --help show this help message and exit
|
Python
|
bsd-3-clause
|
wheeler-microfluidics/microdrop
|
[NB] Add script to create init portable settings
Usage:
python -m microdrop.bin.create_portable_config [-h] output_dir
Create portable MicroDrop settings directory.
positional arguments:
output_dir
optional arguments:
-h, --help show this help message and exit
|
import os
import sys
import pkg_resources
from path_helpers import path
import jinja2
config_template = '''
data_dir = .
[plugins]
# directory containing microdrop plugins
directory = plugins
[microdrop.gui.experiment_log_controller]
notebook_directory = notebooks
[microdrop.gui.dmf_device_controller]
device_directory = devices
'''
launcher_template = '''
cd {{ working_dir }}
{{ py_exe }} -m microdrop.microdrop -c {{ config_path }}
'''
def parse_args(args=None):
'''Parses arguments, returns (options, args).'''
from argparse import ArgumentParser
if args is None:
args = sys.argv
parser = ArgumentParser(description='Create portable MicroDrop settings '
'directory.')
parser.add_argument('output_dir', type=path)
args = parser.parse_args()
return args
def main(output_dir):
output_dir = path(output_dir)
if not output_dir.isdir():
output_dir.makedirs_p()
elif list(output_dir.files()):
raise IOError('Output directory exists and is not empty.')
config_path = output_dir.joinpath('microdrop.ini')
with config_path.open('wb') as output:
template = jinja2.Template(config_template)
config_str = template.render(output_dir=output_dir.name)
output.write(config_str)
py_exe = path(sys.executable).abspath()
launcher_path = output_dir.joinpath('microdrop.bat')
with launcher_path.open('wb') as output:
template = jinja2.Template(launcher_template)
launcher_str = template.render(working_dir=output_dir.abspath(),
py_exe=py_exe,
config_path=config_path.abspath())
output.write(launcher_str)
print 'Start MicroDrop with the following:'
print '\n %s' % launcher_path.abspath()
if __name__ == '__main__':
args = parse_args()
main(args.output_dir)
|
<commit_before><commit_msg>[NB] Add script to create init portable settings
Usage:
python -m microdrop.bin.create_portable_config [-h] output_dir
Create portable MicroDrop settings directory.
positional arguments:
output_dir
optional arguments:
-h, --help show this help message and exit<commit_after>
|
import os
import sys
import pkg_resources
from path_helpers import path
import jinja2
config_template = '''
data_dir = .
[plugins]
# directory containing microdrop plugins
directory = plugins
[microdrop.gui.experiment_log_controller]
notebook_directory = notebooks
[microdrop.gui.dmf_device_controller]
device_directory = devices
'''
launcher_template = '''
cd {{ working_dir }}
{{ py_exe }} -m microdrop.microdrop -c {{ config_path }}
'''
def parse_args(args=None):
'''Parses arguments, returns (options, args).'''
from argparse import ArgumentParser
if args is None:
args = sys.argv
parser = ArgumentParser(description='Create portable MicroDrop settings '
'directory.')
parser.add_argument('output_dir', type=path)
args = parser.parse_args()
return args
def main(output_dir):
output_dir = path(output_dir)
if not output_dir.isdir():
output_dir.makedirs_p()
elif list(output_dir.files()):
raise IOError('Output directory exists and is not empty.')
config_path = output_dir.joinpath('microdrop.ini')
with config_path.open('wb') as output:
template = jinja2.Template(config_template)
config_str = template.render(output_dir=output_dir.name)
output.write(config_str)
py_exe = path(sys.executable).abspath()
launcher_path = output_dir.joinpath('microdrop.bat')
with launcher_path.open('wb') as output:
template = jinja2.Template(launcher_template)
launcher_str = template.render(working_dir=output_dir.abspath(),
py_exe=py_exe,
config_path=config_path.abspath())
output.write(launcher_str)
print 'Start MicroDrop with the following:'
print '\n %s' % launcher_path.abspath()
if __name__ == '__main__':
args = parse_args()
main(args.output_dir)
|
[NB] Add script to create init portable settings
Usage:
python -m microdrop.bin.create_portable_config [-h] output_dir
Create portable MicroDrop settings directory.
positional arguments:
output_dir
optional arguments:
-h, --help show this help message and exitimport os
import sys
import pkg_resources
from path_helpers import path
import jinja2
config_template = '''
data_dir = .
[plugins]
# directory containing microdrop plugins
directory = plugins
[microdrop.gui.experiment_log_controller]
notebook_directory = notebooks
[microdrop.gui.dmf_device_controller]
device_directory = devices
'''
launcher_template = '''
cd {{ working_dir }}
{{ py_exe }} -m microdrop.microdrop -c {{ config_path }}
'''
def parse_args(args=None):
'''Parses arguments, returns (options, args).'''
from argparse import ArgumentParser
if args is None:
args = sys.argv
parser = ArgumentParser(description='Create portable MicroDrop settings '
'directory.')
parser.add_argument('output_dir', type=path)
args = parser.parse_args()
return args
def main(output_dir):
output_dir = path(output_dir)
if not output_dir.isdir():
output_dir.makedirs_p()
elif list(output_dir.files()):
raise IOError('Output directory exists and is not empty.')
config_path = output_dir.joinpath('microdrop.ini')
with config_path.open('wb') as output:
template = jinja2.Template(config_template)
config_str = template.render(output_dir=output_dir.name)
output.write(config_str)
py_exe = path(sys.executable).abspath()
launcher_path = output_dir.joinpath('microdrop.bat')
with launcher_path.open('wb') as output:
template = jinja2.Template(launcher_template)
launcher_str = template.render(working_dir=output_dir.abspath(),
py_exe=py_exe,
config_path=config_path.abspath())
output.write(launcher_str)
print 'Start MicroDrop with the following:'
print '\n %s' % launcher_path.abspath()
if __name__ == '__main__':
args = parse_args()
main(args.output_dir)
|
<commit_before><commit_msg>[NB] Add script to create init portable settings
Usage:
python -m microdrop.bin.create_portable_config [-h] output_dir
Create portable MicroDrop settings directory.
positional arguments:
output_dir
optional arguments:
-h, --help show this help message and exit<commit_after>import os
import sys
import pkg_resources
from path_helpers import path
import jinja2
config_template = '''
data_dir = .
[plugins]
# directory containing microdrop plugins
directory = plugins
[microdrop.gui.experiment_log_controller]
notebook_directory = notebooks
[microdrop.gui.dmf_device_controller]
device_directory = devices
'''
launcher_template = '''
cd {{ working_dir }}
{{ py_exe }} -m microdrop.microdrop -c {{ config_path }}
'''
def parse_args(args=None):
'''Parses arguments, returns (options, args).'''
from argparse import ArgumentParser
if args is None:
args = sys.argv
parser = ArgumentParser(description='Create portable MicroDrop settings '
'directory.')
parser.add_argument('output_dir', type=path)
args = parser.parse_args()
return args
def main(output_dir):
output_dir = path(output_dir)
if not output_dir.isdir():
output_dir.makedirs_p()
elif list(output_dir.files()):
raise IOError('Output directory exists and is not empty.')
config_path = output_dir.joinpath('microdrop.ini')
with config_path.open('wb') as output:
template = jinja2.Template(config_template)
config_str = template.render(output_dir=output_dir.name)
output.write(config_str)
py_exe = path(sys.executable).abspath()
launcher_path = output_dir.joinpath('microdrop.bat')
with launcher_path.open('wb') as output:
template = jinja2.Template(launcher_template)
launcher_str = template.render(working_dir=output_dir.abspath(),
py_exe=py_exe,
config_path=config_path.abspath())
output.write(launcher_str)
print 'Start MicroDrop with the following:'
print '\n %s' % launcher_path.abspath()
if __name__ == '__main__':
args = parse_args()
main(args.output_dir)
|
|
ee0abfe77086737ad4471d09e5f55536874132ab
|
ideascube/conf/idb_fra_grandesynthe.py
|
ideascube/conf/idb_fra_grandesynthe.py
|
# -*- coding: utf-8 -*-
"""Ideaxbox for Grande Synthe, France"""
from .idb import * # noqa
from django.utils.translation import ugettext_lazy as _
IDEASCUBE_NAME = u"Grande-Synthe"
IDEASCUBE_PLACE_NAME = _("city")
COUNTRIES_FIRST = ['FR']
TIME_ZONE = None
LANGUAGE_CODE = 'fr'
LOAN_DURATION = 14
MONITORING_ENTRY_EXPORT_FIELDS = ['serial', 'user_id', 'birth_year', 'gender']
USER_FORM_FIELDS = (
('Ideasbox', ['serial', 'box_awareness']),
(_('Personal informations'), ['short_name', 'full_name', 'birth_year', 'gender', 'id_card_number']), # noqa
(_('Family'), ['marital_status', 'family_status', 'children_under_12', 'children_under_18', 'children_above_18']), # noqa
(_('In the town'), ['current_occupation', 'school_level']),
(_('Language skills'), ['en_level']),
)
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'library',
},
{
'id': 'mediacenter',
},
{
'id': 'khanacademy',
},
{
'id': 'wikipedia',
'languages': ['ku', 'ar', 'fa', 'fr']
},
{
'id' : 'dirtybiology',
'languages': ['fr']
},
{
'id': 'cest-pas-sorcier',
},
{
'id': 'ted',
'sessions': [
('tedbusiness.en', 'Business'),
('teddesign.en', 'Design'),
('tedentertainment.en', 'Entertainment'),
('tedglobalissues.en', 'Global Issues'),
('tedscience.en', 'Science'),
('tedtechnology.en', 'Technology'),
]
},
{
'id': 'wiktionary',
'languages': ['fr', 'fa', 'ar', 'ku']
},
{
'id': 'wikiversity',
'languages': ['fr', 'ar', 'fa']
},
{
'id': 'universcience',
'languages': ['fr']
},
{
'id': 'lesfondamentaux',
},
{
'id': 'bilconference',
},
{
'id': 'icd10',
},
{
'id': 'mullahpiaz',
},
{
'id': 'w2eu',
}
]
|
Add config file for Grande Synthe France
|
Add config file for Grande Synthe France
|
Python
|
agpl-3.0
|
ideascube/ideascube,ideascube/ideascube,ideascube/ideascube,ideascube/ideascube
|
Add config file for Grande Synthe France
|
# -*- coding: utf-8 -*-
"""Ideaxbox for Grande Synthe, France"""
from .idb import * # noqa
from django.utils.translation import ugettext_lazy as _
IDEASCUBE_NAME = u"Grande-Synthe"
IDEASCUBE_PLACE_NAME = _("city")
COUNTRIES_FIRST = ['FR']
TIME_ZONE = None
LANGUAGE_CODE = 'fr'
LOAN_DURATION = 14
MONITORING_ENTRY_EXPORT_FIELDS = ['serial', 'user_id', 'birth_year', 'gender']
USER_FORM_FIELDS = (
('Ideasbox', ['serial', 'box_awareness']),
(_('Personal informations'), ['short_name', 'full_name', 'birth_year', 'gender', 'id_card_number']), # noqa
(_('Family'), ['marital_status', 'family_status', 'children_under_12', 'children_under_18', 'children_above_18']), # noqa
(_('In the town'), ['current_occupation', 'school_level']),
(_('Language skills'), ['en_level']),
)
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'library',
},
{
'id': 'mediacenter',
},
{
'id': 'khanacademy',
},
{
'id': 'wikipedia',
'languages': ['ku', 'ar', 'fa', 'fr']
},
{
'id' : 'dirtybiology',
'languages': ['fr']
},
{
'id': 'cest-pas-sorcier',
},
{
'id': 'ted',
'sessions': [
('tedbusiness.en', 'Business'),
('teddesign.en', 'Design'),
('tedentertainment.en', 'Entertainment'),
('tedglobalissues.en', 'Global Issues'),
('tedscience.en', 'Science'),
('tedtechnology.en', 'Technology'),
]
},
{
'id': 'wiktionary',
'languages': ['fr', 'fa', 'ar', 'ku']
},
{
'id': 'wikiversity',
'languages': ['fr', 'ar', 'fa']
},
{
'id': 'universcience',
'languages': ['fr']
},
{
'id': 'lesfondamentaux',
},
{
'id': 'bilconference',
},
{
'id': 'icd10',
},
{
'id': 'mullahpiaz',
},
{
'id': 'w2eu',
}
]
|
<commit_before><commit_msg>Add config file for Grande Synthe France<commit_after>
|
# -*- coding: utf-8 -*-
"""Ideaxbox for Grande Synthe, France"""
from .idb import * # noqa
from django.utils.translation import ugettext_lazy as _
IDEASCUBE_NAME = u"Grande-Synthe"
IDEASCUBE_PLACE_NAME = _("city")
COUNTRIES_FIRST = ['FR']
TIME_ZONE = None
LANGUAGE_CODE = 'fr'
LOAN_DURATION = 14
MONITORING_ENTRY_EXPORT_FIELDS = ['serial', 'user_id', 'birth_year', 'gender']
USER_FORM_FIELDS = (
('Ideasbox', ['serial', 'box_awareness']),
(_('Personal informations'), ['short_name', 'full_name', 'birth_year', 'gender', 'id_card_number']), # noqa
(_('Family'), ['marital_status', 'family_status', 'children_under_12', 'children_under_18', 'children_above_18']), # noqa
(_('In the town'), ['current_occupation', 'school_level']),
(_('Language skills'), ['en_level']),
)
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'library',
},
{
'id': 'mediacenter',
},
{
'id': 'khanacademy',
},
{
'id': 'wikipedia',
'languages': ['ku', 'ar', 'fa', 'fr']
},
{
'id' : 'dirtybiology',
'languages': ['fr']
},
{
'id': 'cest-pas-sorcier',
},
{
'id': 'ted',
'sessions': [
('tedbusiness.en', 'Business'),
('teddesign.en', 'Design'),
('tedentertainment.en', 'Entertainment'),
('tedglobalissues.en', 'Global Issues'),
('tedscience.en', 'Science'),
('tedtechnology.en', 'Technology'),
]
},
{
'id': 'wiktionary',
'languages': ['fr', 'fa', 'ar', 'ku']
},
{
'id': 'wikiversity',
'languages': ['fr', 'ar', 'fa']
},
{
'id': 'universcience',
'languages': ['fr']
},
{
'id': 'lesfondamentaux',
},
{
'id': 'bilconference',
},
{
'id': 'icd10',
},
{
'id': 'mullahpiaz',
},
{
'id': 'w2eu',
}
]
|
Add config file for Grande Synthe France# -*- coding: utf-8 -*-
"""Ideaxbox for Grande Synthe, France"""
from .idb import * # noqa
from django.utils.translation import ugettext_lazy as _
IDEASCUBE_NAME = u"Grande-Synthe"
IDEASCUBE_PLACE_NAME = _("city")
COUNTRIES_FIRST = ['FR']
TIME_ZONE = None
LANGUAGE_CODE = 'fr'
LOAN_DURATION = 14
MONITORING_ENTRY_EXPORT_FIELDS = ['serial', 'user_id', 'birth_year', 'gender']
USER_FORM_FIELDS = (
('Ideasbox', ['serial', 'box_awareness']),
(_('Personal informations'), ['short_name', 'full_name', 'birth_year', 'gender', 'id_card_number']), # noqa
(_('Family'), ['marital_status', 'family_status', 'children_under_12', 'children_under_18', 'children_above_18']), # noqa
(_('In the town'), ['current_occupation', 'school_level']),
(_('Language skills'), ['en_level']),
)
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'library',
},
{
'id': 'mediacenter',
},
{
'id': 'khanacademy',
},
{
'id': 'wikipedia',
'languages': ['ku', 'ar', 'fa', 'fr']
},
{
'id' : 'dirtybiology',
'languages': ['fr']
},
{
'id': 'cest-pas-sorcier',
},
{
'id': 'ted',
'sessions': [
('tedbusiness.en', 'Business'),
('teddesign.en', 'Design'),
('tedentertainment.en', 'Entertainment'),
('tedglobalissues.en', 'Global Issues'),
('tedscience.en', 'Science'),
('tedtechnology.en', 'Technology'),
]
},
{
'id': 'wiktionary',
'languages': ['fr', 'fa', 'ar', 'ku']
},
{
'id': 'wikiversity',
'languages': ['fr', 'ar', 'fa']
},
{
'id': 'universcience',
'languages': ['fr']
},
{
'id': 'lesfondamentaux',
},
{
'id': 'bilconference',
},
{
'id': 'icd10',
},
{
'id': 'mullahpiaz',
},
{
'id': 'w2eu',
}
]
|
<commit_before><commit_msg>Add config file for Grande Synthe France<commit_after># -*- coding: utf-8 -*-
"""Ideaxbox for Grande Synthe, France"""
from .idb import * # noqa
from django.utils.translation import ugettext_lazy as _
IDEASCUBE_NAME = u"Grande-Synthe"
IDEASCUBE_PLACE_NAME = _("city")
COUNTRIES_FIRST = ['FR']
TIME_ZONE = None
LANGUAGE_CODE = 'fr'
LOAN_DURATION = 14
MONITORING_ENTRY_EXPORT_FIELDS = ['serial', 'user_id', 'birth_year', 'gender']
USER_FORM_FIELDS = (
('Ideasbox', ['serial', 'box_awareness']),
(_('Personal informations'), ['short_name', 'full_name', 'birth_year', 'gender', 'id_card_number']), # noqa
(_('Family'), ['marital_status', 'family_status', 'children_under_12', 'children_under_18', 'children_above_18']), # noqa
(_('In the town'), ['current_occupation', 'school_level']),
(_('Language skills'), ['en_level']),
)
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'library',
},
{
'id': 'mediacenter',
},
{
'id': 'khanacademy',
},
{
'id': 'wikipedia',
'languages': ['ku', 'ar', 'fa', 'fr']
},
{
'id' : 'dirtybiology',
'languages': ['fr']
},
{
'id': 'cest-pas-sorcier',
},
{
'id': 'ted',
'sessions': [
('tedbusiness.en', 'Business'),
('teddesign.en', 'Design'),
('tedentertainment.en', 'Entertainment'),
('tedglobalissues.en', 'Global Issues'),
('tedscience.en', 'Science'),
('tedtechnology.en', 'Technology'),
]
},
{
'id': 'wiktionary',
'languages': ['fr', 'fa', 'ar', 'ku']
},
{
'id': 'wikiversity',
'languages': ['fr', 'ar', 'fa']
},
{
'id': 'universcience',
'languages': ['fr']
},
{
'id': 'lesfondamentaux',
},
{
'id': 'bilconference',
},
{
'id': 'icd10',
},
{
'id': 'mullahpiaz',
},
{
'id': 'w2eu',
}
]
|
|
f43a44f56ff965e772c2e8de0a7b485da05065a2
|
ovp_projects/tests/test_serializers.py
|
ovp_projects/tests/test_serializers.py
|
from django.test import TestCase
from django.test import RequestFactory
from ovp_projects.models import Project
from ovp_projects.serializers.project import ProjectRetrieveSerializer
from ovp_projects.serializers.project import ProjectSearchSerializer
from ovp_users.models import User
from ovp_core.models import GoogleAddress
from ovp_organizations.models import Organization
class HiddenAddressTestCase(TestCase):
def setUp(self):
self.user = User.objects.create_user(email="test_user@gmail.com", password="test")
self.second_user = User.objects.create_user(email="test_second_user@test.com", password="test")
self.third_user = User.objects.create_user(email="test_third_user@test.com", password="test")
organization = Organization(name="test", type=0, owner=self.user)
organization.save()
organization.members.add(self.second_user)
address = GoogleAddress(typed_address="Rua. Teçaindá, 81")
address.save()
self.project = Project(name="test project", slug="test slug", details="abc", description="abc", owner=self.user, hidden_address=True, address=address, organization=organization)
self.project.save()
self.request = RequestFactory().get('/')
def test_project_search_serializer_hides_address(self):
""" Assert ProjectSearchSerializer hides adress if Project.hidden_address == True """
self._assert_serializer_hides_address(ProjectSearchSerializer)
def test_project_retrieve_serializer_hides_address(self):
""" Assert ProjectRetrieveSerializer hides adress if Project.hidden_address == True """
self._assert_serializer_hides_address(ProjectRetrieveSerializer)
def _assert_serializer_hides_address(self, serializer_class):
# Owner
self.request.user = self.user
serializer = serializer_class(self.project, context={"request": self.request})
self.assertTrue(serializer.data["address"]["typed_address"] == "Rua. Teçaindá, 81")
self.assertTrue(serializer.data["hidden_address"] == True)
# Organization member
self.request.user = self.second_user
serializer = serializer_class(self.project, context={"request": self.request})
self.assertTrue(serializer.data["address"]["typed_address"] == "Rua. Teçaindá, 81")
self.assertTrue(serializer.data["hidden_address"] == True)
# Non member
self.request.user = self.third_user
serializer = serializer_class(self.project, context={"request": self.request})
self.assertTrue(serializer.data["address"] == None)
self.assertTrue(serializer.data["hidden_address"] == True)
|
Add tests for hidden address on ProjectRetrieveSerializer and ProjectSearchSerializer
|
Add tests for hidden address on ProjectRetrieveSerializer and ProjectSearchSerializer
|
Python
|
agpl-3.0
|
OpenVolunteeringPlatform/django-ovp-projects,OpenVolunteeringPlatform/django-ovp-projects
|
Add tests for hidden address on ProjectRetrieveSerializer and ProjectSearchSerializer
|
from django.test import TestCase
from django.test import RequestFactory
from ovp_projects.models import Project
from ovp_projects.serializers.project import ProjectRetrieveSerializer
from ovp_projects.serializers.project import ProjectSearchSerializer
from ovp_users.models import User
from ovp_core.models import GoogleAddress
from ovp_organizations.models import Organization
class HiddenAddressTestCase(TestCase):
def setUp(self):
self.user = User.objects.create_user(email="test_user@gmail.com", password="test")
self.second_user = User.objects.create_user(email="test_second_user@test.com", password="test")
self.third_user = User.objects.create_user(email="test_third_user@test.com", password="test")
organization = Organization(name="test", type=0, owner=self.user)
organization.save()
organization.members.add(self.second_user)
address = GoogleAddress(typed_address="Rua. Teçaindá, 81")
address.save()
self.project = Project(name="test project", slug="test slug", details="abc", description="abc", owner=self.user, hidden_address=True, address=address, organization=organization)
self.project.save()
self.request = RequestFactory().get('/')
def test_project_search_serializer_hides_address(self):
""" Assert ProjectSearchSerializer hides adress if Project.hidden_address == True """
self._assert_serializer_hides_address(ProjectSearchSerializer)
def test_project_retrieve_serializer_hides_address(self):
""" Assert ProjectRetrieveSerializer hides adress if Project.hidden_address == True """
self._assert_serializer_hides_address(ProjectRetrieveSerializer)
def _assert_serializer_hides_address(self, serializer_class):
# Owner
self.request.user = self.user
serializer = serializer_class(self.project, context={"request": self.request})
self.assertTrue(serializer.data["address"]["typed_address"] == "Rua. Teçaindá, 81")
self.assertTrue(serializer.data["hidden_address"] == True)
# Organization member
self.request.user = self.second_user
serializer = serializer_class(self.project, context={"request": self.request})
self.assertTrue(serializer.data["address"]["typed_address"] == "Rua. Teçaindá, 81")
self.assertTrue(serializer.data["hidden_address"] == True)
# Non member
self.request.user = self.third_user
serializer = serializer_class(self.project, context={"request": self.request})
self.assertTrue(serializer.data["address"] == None)
self.assertTrue(serializer.data["hidden_address"] == True)
|
<commit_before><commit_msg>Add tests for hidden address on ProjectRetrieveSerializer and ProjectSearchSerializer<commit_after>
|
from django.test import TestCase
from django.test import RequestFactory
from ovp_projects.models import Project
from ovp_projects.serializers.project import ProjectRetrieveSerializer
from ovp_projects.serializers.project import ProjectSearchSerializer
from ovp_users.models import User
from ovp_core.models import GoogleAddress
from ovp_organizations.models import Organization
class HiddenAddressTestCase(TestCase):
def setUp(self):
self.user = User.objects.create_user(email="test_user@gmail.com", password="test")
self.second_user = User.objects.create_user(email="test_second_user@test.com", password="test")
self.third_user = User.objects.create_user(email="test_third_user@test.com", password="test")
organization = Organization(name="test", type=0, owner=self.user)
organization.save()
organization.members.add(self.second_user)
address = GoogleAddress(typed_address="Rua. Teçaindá, 81")
address.save()
self.project = Project(name="test project", slug="test slug", details="abc", description="abc", owner=self.user, hidden_address=True, address=address, organization=organization)
self.project.save()
self.request = RequestFactory().get('/')
def test_project_search_serializer_hides_address(self):
""" Assert ProjectSearchSerializer hides adress if Project.hidden_address == True """
self._assert_serializer_hides_address(ProjectSearchSerializer)
def test_project_retrieve_serializer_hides_address(self):
""" Assert ProjectRetrieveSerializer hides adress if Project.hidden_address == True """
self._assert_serializer_hides_address(ProjectRetrieveSerializer)
def _assert_serializer_hides_address(self, serializer_class):
# Owner
self.request.user = self.user
serializer = serializer_class(self.project, context={"request": self.request})
self.assertTrue(serializer.data["address"]["typed_address"] == "Rua. Teçaindá, 81")
self.assertTrue(serializer.data["hidden_address"] == True)
# Organization member
self.request.user = self.second_user
serializer = serializer_class(self.project, context={"request": self.request})
self.assertTrue(serializer.data["address"]["typed_address"] == "Rua. Teçaindá, 81")
self.assertTrue(serializer.data["hidden_address"] == True)
# Non member
self.request.user = self.third_user
serializer = serializer_class(self.project, context={"request": self.request})
self.assertTrue(serializer.data["address"] == None)
self.assertTrue(serializer.data["hidden_address"] == True)
|
Add tests for hidden address on ProjectRetrieveSerializer and ProjectSearchSerializerfrom django.test import TestCase
from django.test import RequestFactory
from ovp_projects.models import Project
from ovp_projects.serializers.project import ProjectRetrieveSerializer
from ovp_projects.serializers.project import ProjectSearchSerializer
from ovp_users.models import User
from ovp_core.models import GoogleAddress
from ovp_organizations.models import Organization
class HiddenAddressTestCase(TestCase):
def setUp(self):
self.user = User.objects.create_user(email="test_user@gmail.com", password="test")
self.second_user = User.objects.create_user(email="test_second_user@test.com", password="test")
self.third_user = User.objects.create_user(email="test_third_user@test.com", password="test")
organization = Organization(name="test", type=0, owner=self.user)
organization.save()
organization.members.add(self.second_user)
address = GoogleAddress(typed_address="Rua. Teçaindá, 81")
address.save()
self.project = Project(name="test project", slug="test slug", details="abc", description="abc", owner=self.user, hidden_address=True, address=address, organization=organization)
self.project.save()
self.request = RequestFactory().get('/')
def test_project_search_serializer_hides_address(self):
""" Assert ProjectSearchSerializer hides adress if Project.hidden_address == True """
self._assert_serializer_hides_address(ProjectSearchSerializer)
def test_project_retrieve_serializer_hides_address(self):
""" Assert ProjectRetrieveSerializer hides adress if Project.hidden_address == True """
self._assert_serializer_hides_address(ProjectRetrieveSerializer)
def _assert_serializer_hides_address(self, serializer_class):
# Owner
self.request.user = self.user
serializer = serializer_class(self.project, context={"request": self.request})
self.assertTrue(serializer.data["address"]["typed_address"] == "Rua. Teçaindá, 81")
self.assertTrue(serializer.data["hidden_address"] == True)
# Organization member
self.request.user = self.second_user
serializer = serializer_class(self.project, context={"request": self.request})
self.assertTrue(serializer.data["address"]["typed_address"] == "Rua. Teçaindá, 81")
self.assertTrue(serializer.data["hidden_address"] == True)
# Non member
self.request.user = self.third_user
serializer = serializer_class(self.project, context={"request": self.request})
self.assertTrue(serializer.data["address"] == None)
self.assertTrue(serializer.data["hidden_address"] == True)
|
<commit_before><commit_msg>Add tests for hidden address on ProjectRetrieveSerializer and ProjectSearchSerializer<commit_after>from django.test import TestCase
from django.test import RequestFactory
from ovp_projects.models import Project
from ovp_projects.serializers.project import ProjectRetrieveSerializer
from ovp_projects.serializers.project import ProjectSearchSerializer
from ovp_users.models import User
from ovp_core.models import GoogleAddress
from ovp_organizations.models import Organization
class HiddenAddressTestCase(TestCase):
def setUp(self):
self.user = User.objects.create_user(email="test_user@gmail.com", password="test")
self.second_user = User.objects.create_user(email="test_second_user@test.com", password="test")
self.third_user = User.objects.create_user(email="test_third_user@test.com", password="test")
organization = Organization(name="test", type=0, owner=self.user)
organization.save()
organization.members.add(self.second_user)
address = GoogleAddress(typed_address="Rua. Teçaindá, 81")
address.save()
self.project = Project(name="test project", slug="test slug", details="abc", description="abc", owner=self.user, hidden_address=True, address=address, organization=organization)
self.project.save()
self.request = RequestFactory().get('/')
def test_project_search_serializer_hides_address(self):
""" Assert ProjectSearchSerializer hides adress if Project.hidden_address == True """
self._assert_serializer_hides_address(ProjectSearchSerializer)
def test_project_retrieve_serializer_hides_address(self):
""" Assert ProjectRetrieveSerializer hides adress if Project.hidden_address == True """
self._assert_serializer_hides_address(ProjectRetrieveSerializer)
def _assert_serializer_hides_address(self, serializer_class):
# Owner
self.request.user = self.user
serializer = serializer_class(self.project, context={"request": self.request})
self.assertTrue(serializer.data["address"]["typed_address"] == "Rua. Teçaindá, 81")
self.assertTrue(serializer.data["hidden_address"] == True)
# Organization member
self.request.user = self.second_user
serializer = serializer_class(self.project, context={"request": self.request})
self.assertTrue(serializer.data["address"]["typed_address"] == "Rua. Teçaindá, 81")
self.assertTrue(serializer.data["hidden_address"] == True)
# Non member
self.request.user = self.third_user
serializer = serializer_class(self.project, context={"request": self.request})
self.assertTrue(serializer.data["address"] == None)
self.assertTrue(serializer.data["hidden_address"] == True)
|
|
985e9167bdfa857c5148ee0431fdd13d50ba787e
|
viewflow/migrations/0002_fsmchange.py
|
viewflow/migrations/0002_fsmchange.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def update_status(apps, schema_editor):
Process = apps.get_model("viewflow", "Process")
Process.objects.filter(status='STR').update(status='STARTED')
Process.objects.filter(status='FNS').update(status='DONE')
Task = apps.get_model("viewflow", "Task")
Task.objects.filter(status='ASN').update(status='ASSIGNED')
Task.objects.filter(status='STR').update(status='STARTED')
Task.objects.filter(status='FNS').update(status='DONE')
class Migration(migrations.Migration):
dependencies = [
('viewflow', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='task',
name='comments',
field=models.TextField(blank=True, null=True),
preserve_default=True,
),
migrations.RunPython(
update_status
)
]
|
Fix migrations for 0.8 -> 0.9 upgrade
|
Fix migrations for 0.8 -> 0.9 upgrade
|
Python
|
agpl-3.0
|
pombredanne/viewflow,viewflow/viewflow,viewflow/viewflow,ribeiro-ucl/viewflow,ribeiro-ucl/viewflow,codingjoe/viewflow,codingjoe/viewflow,codingjoe/viewflow,pombredanne/viewflow,viewflow/viewflow,ribeiro-ucl/viewflow
|
Fix migrations for 0.8 -> 0.9 upgrade
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def update_status(apps, schema_editor):
Process = apps.get_model("viewflow", "Process")
Process.objects.filter(status='STR').update(status='STARTED')
Process.objects.filter(status='FNS').update(status='DONE')
Task = apps.get_model("viewflow", "Task")
Task.objects.filter(status='ASN').update(status='ASSIGNED')
Task.objects.filter(status='STR').update(status='STARTED')
Task.objects.filter(status='FNS').update(status='DONE')
class Migration(migrations.Migration):
dependencies = [
('viewflow', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='task',
name='comments',
field=models.TextField(blank=True, null=True),
preserve_default=True,
),
migrations.RunPython(
update_status
)
]
|
<commit_before><commit_msg>Fix migrations for 0.8 -> 0.9 upgrade<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def update_status(apps, schema_editor):
Process = apps.get_model("viewflow", "Process")
Process.objects.filter(status='STR').update(status='STARTED')
Process.objects.filter(status='FNS').update(status='DONE')
Task = apps.get_model("viewflow", "Task")
Task.objects.filter(status='ASN').update(status='ASSIGNED')
Task.objects.filter(status='STR').update(status='STARTED')
Task.objects.filter(status='FNS').update(status='DONE')
class Migration(migrations.Migration):
dependencies = [
('viewflow', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='task',
name='comments',
field=models.TextField(blank=True, null=True),
preserve_default=True,
),
migrations.RunPython(
update_status
)
]
|
Fix migrations for 0.8 -> 0.9 upgrade# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def update_status(apps, schema_editor):
Process = apps.get_model("viewflow", "Process")
Process.objects.filter(status='STR').update(status='STARTED')
Process.objects.filter(status='FNS').update(status='DONE')
Task = apps.get_model("viewflow", "Task")
Task.objects.filter(status='ASN').update(status='ASSIGNED')
Task.objects.filter(status='STR').update(status='STARTED')
Task.objects.filter(status='FNS').update(status='DONE')
class Migration(migrations.Migration):
dependencies = [
('viewflow', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='task',
name='comments',
field=models.TextField(blank=True, null=True),
preserve_default=True,
),
migrations.RunPython(
update_status
)
]
|
<commit_before><commit_msg>Fix migrations for 0.8 -> 0.9 upgrade<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def update_status(apps, schema_editor):
Process = apps.get_model("viewflow", "Process")
Process.objects.filter(status='STR').update(status='STARTED')
Process.objects.filter(status='FNS').update(status='DONE')
Task = apps.get_model("viewflow", "Task")
Task.objects.filter(status='ASN').update(status='ASSIGNED')
Task.objects.filter(status='STR').update(status='STARTED')
Task.objects.filter(status='FNS').update(status='DONE')
class Migration(migrations.Migration):
dependencies = [
('viewflow', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='task',
name='comments',
field=models.TextField(blank=True, null=True),
preserve_default=True,
),
migrations.RunPython(
update_status
)
]
|
|
3237345bdc725fc631b208ea705cb41b722bcd1f
|
exercises/chapter_03/exercise_03_01.py
|
exercises/chapter_03/exercise_03_01.py
|
# 3-1 Names
friend_names = ["Albert Einstein", "Isac Newton", "Marie Curie", "Galileo Galilei"]
print(friend_names[0])
print(friend_names[1])
print(friend_names[2])
print(friend_names[3])
|
Add solution to exercise 3.1.
|
Add solution to exercise 3.1.
|
Python
|
mit
|
HenrikSamuelsson/python-crash-course
|
Add solution to exercise 3.1.
|
# 3-1 Names
friend_names = ["Albert Einstein", "Isac Newton", "Marie Curie", "Galileo Galilei"]
print(friend_names[0])
print(friend_names[1])
print(friend_names[2])
print(friend_names[3])
|
<commit_before><commit_msg>Add solution to exercise 3.1.<commit_after>
|
# 3-1 Names
friend_names = ["Albert Einstein", "Isac Newton", "Marie Curie", "Galileo Galilei"]
print(friend_names[0])
print(friend_names[1])
print(friend_names[2])
print(friend_names[3])
|
Add solution to exercise 3.1.# 3-1 Names
friend_names = ["Albert Einstein", "Isac Newton", "Marie Curie", "Galileo Galilei"]
print(friend_names[0])
print(friend_names[1])
print(friend_names[2])
print(friend_names[3])
|
<commit_before><commit_msg>Add solution to exercise 3.1.<commit_after># 3-1 Names
friend_names = ["Albert Einstein", "Isac Newton", "Marie Curie", "Galileo Galilei"]
print(friend_names[0])
print(friend_names[1])
print(friend_names[2])
print(friend_names[3])
|
|
455806aa0a25f2c632c62f00de21b5d3768135cb
|
cherrypy/test/test_wsgi_unix_socket.py
|
cherrypy/test/test_wsgi_unix_socket.py
|
import os
import sys
import socket
import atexit
import cherrypy
from cherrypy.test import helper
from cherrypy._cpcompat import HTTPConnection
USOCKET_PATH = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'cp_test.sock'
)
class USocketHTTPConnection(HTTPConnection):
"""
HTTPConnection over a unix socket.
"""
def __init__(self, path):
HTTPConnection.__init__(self, 'localhost')
self.path = path
def __call__(self, *args, **kwargs):
"""
Catch-all method just to present itself as a constructor for the
HTTPConnection.
"""
return self
def connect(self):
"""
Override the connect method and assign a unix socket as a transport.
"""
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(self.path)
self.sock = sock
atexit.register(lambda: os.remove(self.path))
def skip_on_windows(method):
"""
Decorator to skip the method call if the test is executing on Windows.
"""
def wrapper(self):
if sys.platform == "win32":
return self.skip("No UNIX Socket support in Windows.")
else:
return method(self)
wrapper.__doc__ = method.__doc__
wrapper.__name__ = method.__name__
return wrapper
class WSGI_UnixSocket_Test(helper.CPWebCase):
"""
Test basic behavior on a cherrypy wsgi server listening
on a unix socket.
It exercises the config option `server.socket_file`.
"""
HTTP_CONN = USocketHTTPConnection(USOCKET_PATH)
@staticmethod
def setup_server():
class Root(object):
@cherrypy.expose
def index(self):
return "Test OK"
@cherrypy.expose
def error(self):
raise Exception("Invalid page")
config = {
'server.socket_file': USOCKET_PATH
}
cherrypy.config.update(config)
cherrypy.tree.mount(Root())
def tearDown(self):
cherrypy.config.update({'server.socket_file': None})
@skip_on_windows
def test_simple_request(self):
self.getPage("/")
self.assertStatus("200 OK")
self.assertInBody("Test OK")
@skip_on_windows
def test_not_found(self):
self.getPage("/invalid_path")
self.assertStatus("404 Not Found")
@skip_on_windows
def test_internal_error(self):
self.getPage("/error")
self.assertStatus("500 Internal Server Error")
self.assertInBody("Invalid page")
|
Add a basic test case for the unix socket support in cherrypy.
|
Add a basic test case for the unix socket support in cherrypy.
|
Python
|
bsd-3-clause
|
Safihre/cherrypy,cherrypy/cherrypy,Safihre/cherrypy,cherrypy/cherrypy,cherrypy/cheroot
|
Add a basic test case for the unix socket support in cherrypy.
|
import os
import sys
import socket
import atexit
import cherrypy
from cherrypy.test import helper
from cherrypy._cpcompat import HTTPConnection
USOCKET_PATH = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'cp_test.sock'
)
class USocketHTTPConnection(HTTPConnection):
"""
HTTPConnection over a unix socket.
"""
def __init__(self, path):
HTTPConnection.__init__(self, 'localhost')
self.path = path
def __call__(self, *args, **kwargs):
"""
Catch-all method just to present itself as a constructor for the
HTTPConnection.
"""
return self
def connect(self):
"""
Override the connect method and assign a unix socket as a transport.
"""
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(self.path)
self.sock = sock
atexit.register(lambda: os.remove(self.path))
def skip_on_windows(method):
"""
Decorator to skip the method call if the test is executing on Windows.
"""
def wrapper(self):
if sys.platform == "win32":
return self.skip("No UNIX Socket support in Windows.")
else:
return method(self)
wrapper.__doc__ = method.__doc__
wrapper.__name__ = method.__name__
return wrapper
class WSGI_UnixSocket_Test(helper.CPWebCase):
"""
Test basic behavior on a cherrypy wsgi server listening
on a unix socket.
It exercises the config option `server.socket_file`.
"""
HTTP_CONN = USocketHTTPConnection(USOCKET_PATH)
@staticmethod
def setup_server():
class Root(object):
@cherrypy.expose
def index(self):
return "Test OK"
@cherrypy.expose
def error(self):
raise Exception("Invalid page")
config = {
'server.socket_file': USOCKET_PATH
}
cherrypy.config.update(config)
cherrypy.tree.mount(Root())
def tearDown(self):
cherrypy.config.update({'server.socket_file': None})
@skip_on_windows
def test_simple_request(self):
self.getPage("/")
self.assertStatus("200 OK")
self.assertInBody("Test OK")
@skip_on_windows
def test_not_found(self):
self.getPage("/invalid_path")
self.assertStatus("404 Not Found")
@skip_on_windows
def test_internal_error(self):
self.getPage("/error")
self.assertStatus("500 Internal Server Error")
self.assertInBody("Invalid page")
|
<commit_before><commit_msg>Add a basic test case for the unix socket support in cherrypy.<commit_after>
|
import os
import sys
import socket
import atexit
import cherrypy
from cherrypy.test import helper
from cherrypy._cpcompat import HTTPConnection
USOCKET_PATH = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'cp_test.sock'
)
class USocketHTTPConnection(HTTPConnection):
"""
HTTPConnection over a unix socket.
"""
def __init__(self, path):
HTTPConnection.__init__(self, 'localhost')
self.path = path
def __call__(self, *args, **kwargs):
"""
Catch-all method just to present itself as a constructor for the
HTTPConnection.
"""
return self
def connect(self):
"""
Override the connect method and assign a unix socket as a transport.
"""
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(self.path)
self.sock = sock
atexit.register(lambda: os.remove(self.path))
def skip_on_windows(method):
"""
Decorator to skip the method call if the test is executing on Windows.
"""
def wrapper(self):
if sys.platform == "win32":
return self.skip("No UNIX Socket support in Windows.")
else:
return method(self)
wrapper.__doc__ = method.__doc__
wrapper.__name__ = method.__name__
return wrapper
class WSGI_UnixSocket_Test(helper.CPWebCase):
"""
Test basic behavior on a cherrypy wsgi server listening
on a unix socket.
It exercises the config option `server.socket_file`.
"""
HTTP_CONN = USocketHTTPConnection(USOCKET_PATH)
@staticmethod
def setup_server():
class Root(object):
@cherrypy.expose
def index(self):
return "Test OK"
@cherrypy.expose
def error(self):
raise Exception("Invalid page")
config = {
'server.socket_file': USOCKET_PATH
}
cherrypy.config.update(config)
cherrypy.tree.mount(Root())
def tearDown(self):
cherrypy.config.update({'server.socket_file': None})
@skip_on_windows
def test_simple_request(self):
self.getPage("/")
self.assertStatus("200 OK")
self.assertInBody("Test OK")
@skip_on_windows
def test_not_found(self):
self.getPage("/invalid_path")
self.assertStatus("404 Not Found")
@skip_on_windows
def test_internal_error(self):
self.getPage("/error")
self.assertStatus("500 Internal Server Error")
self.assertInBody("Invalid page")
|
Add a basic test case for the unix socket support in cherrypy.import os
import sys
import socket
import atexit
import cherrypy
from cherrypy.test import helper
from cherrypy._cpcompat import HTTPConnection
USOCKET_PATH = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'cp_test.sock'
)
class USocketHTTPConnection(HTTPConnection):
"""
HTTPConnection over a unix socket.
"""
def __init__(self, path):
HTTPConnection.__init__(self, 'localhost')
self.path = path
def __call__(self, *args, **kwargs):
"""
Catch-all method just to present itself as a constructor for the
HTTPConnection.
"""
return self
def connect(self):
"""
Override the connect method and assign a unix socket as a transport.
"""
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(self.path)
self.sock = sock
atexit.register(lambda: os.remove(self.path))
def skip_on_windows(method):
"""
Decorator to skip the method call if the test is executing on Windows.
"""
def wrapper(self):
if sys.platform == "win32":
return self.skip("No UNIX Socket support in Windows.")
else:
return method(self)
wrapper.__doc__ = method.__doc__
wrapper.__name__ = method.__name__
return wrapper
class WSGI_UnixSocket_Test(helper.CPWebCase):
"""
Test basic behavior on a cherrypy wsgi server listening
on a unix socket.
It exercises the config option `server.socket_file`.
"""
HTTP_CONN = USocketHTTPConnection(USOCKET_PATH)
@staticmethod
def setup_server():
class Root(object):
@cherrypy.expose
def index(self):
return "Test OK"
@cherrypy.expose
def error(self):
raise Exception("Invalid page")
config = {
'server.socket_file': USOCKET_PATH
}
cherrypy.config.update(config)
cherrypy.tree.mount(Root())
def tearDown(self):
cherrypy.config.update({'server.socket_file': None})
@skip_on_windows
def test_simple_request(self):
self.getPage("/")
self.assertStatus("200 OK")
self.assertInBody("Test OK")
@skip_on_windows
def test_not_found(self):
self.getPage("/invalid_path")
self.assertStatus("404 Not Found")
@skip_on_windows
def test_internal_error(self):
self.getPage("/error")
self.assertStatus("500 Internal Server Error")
self.assertInBody("Invalid page")
|
<commit_before><commit_msg>Add a basic test case for the unix socket support in cherrypy.<commit_after>import os
import sys
import socket
import atexit
import cherrypy
from cherrypy.test import helper
from cherrypy._cpcompat import HTTPConnection
USOCKET_PATH = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'cp_test.sock'
)
class USocketHTTPConnection(HTTPConnection):
"""
HTTPConnection over a unix socket.
"""
def __init__(self, path):
HTTPConnection.__init__(self, 'localhost')
self.path = path
def __call__(self, *args, **kwargs):
"""
Catch-all method just to present itself as a constructor for the
HTTPConnection.
"""
return self
def connect(self):
"""
Override the connect method and assign a unix socket as a transport.
"""
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(self.path)
self.sock = sock
atexit.register(lambda: os.remove(self.path))
def skip_on_windows(method):
"""
Decorator to skip the method call if the test is executing on Windows.
"""
def wrapper(self):
if sys.platform == "win32":
return self.skip("No UNIX Socket support in Windows.")
else:
return method(self)
wrapper.__doc__ = method.__doc__
wrapper.__name__ = method.__name__
return wrapper
class WSGI_UnixSocket_Test(helper.CPWebCase):
"""
Test basic behavior on a cherrypy wsgi server listening
on a unix socket.
It exercises the config option `server.socket_file`.
"""
HTTP_CONN = USocketHTTPConnection(USOCKET_PATH)
@staticmethod
def setup_server():
class Root(object):
@cherrypy.expose
def index(self):
return "Test OK"
@cherrypy.expose
def error(self):
raise Exception("Invalid page")
config = {
'server.socket_file': USOCKET_PATH
}
cherrypy.config.update(config)
cherrypy.tree.mount(Root())
def tearDown(self):
cherrypy.config.update({'server.socket_file': None})
@skip_on_windows
def test_simple_request(self):
self.getPage("/")
self.assertStatus("200 OK")
self.assertInBody("Test OK")
@skip_on_windows
def test_not_found(self):
self.getPage("/invalid_path")
self.assertStatus("404 Not Found")
@skip_on_windows
def test_internal_error(self):
self.getPage("/error")
self.assertStatus("500 Internal Server Error")
self.assertInBody("Invalid page")
|
|
f48ebff1ba0bb8fd3c1335db5bec101032d0d31b
|
scripts/make-gaps-for-missing-layers.py
|
scripts/make-gaps-for-missing-layers.py
|
#!/usr/bin/python
# This script renames the directories in a CATMAID stack's image root
# to take into account missing layers corresponding to particular
# z-values. This can occur when exporting the data from TrakEM2.
import glob, os, re, sys, subprocess
layers_missing_z = [
4950.0,
9450.0,
17500.0,
17550.0,
17600.0
]
layers_missing = [ int(round(z/50)) for z in layers_missing_z ]
layers_missing.sort()
layers_to_insert = layers_missing[:]
# Find the layers we already have:
directories = filter(lambda x: re.match('\d+$', x), os.listdir('.'))
directories = [int(x, 10) for x in directories]
directories.sort()
directory_mapping = zip(directories, directories)
while layers_to_insert:
missing_layer = layers_to_insert.pop(0)
for i, t in enumerate(directory_mapping):
if t[1] >= missing_layer:
directory_mapping[i] = (t[0], t[1] + 1)
directory_mapping.reverse()
for t in directory_mapping:
if t[0] != t[1]:
print "Will rename", t[0], "to", t[1]
subprocess.check_call(["mv", str(t[0]), str(t[1])])
for l in layers_missing:
print "Will create directory for missing layer", l
os.mkdir(str(l))
|
Add a script to rename CATMAID image directories to take account of missing layers
|
Add a script to rename CATMAID image directories to take account of missing layers
|
Python
|
agpl-3.0
|
fzadow/CATMAID,htem/CATMAID,fzadow/CATMAID,htem/CATMAID,fzadow/CATMAID,fzadow/CATMAID,htem/CATMAID,htem/CATMAID
|
Add a script to rename CATMAID image directories to take account of missing layers
|
#!/usr/bin/python
# This script renames the directories in a CATMAID stack's image root
# to take into account missing layers corresponding to particular
# z-values. This can occur when exporting the data from TrakEM2.
import glob, os, re, sys, subprocess
layers_missing_z = [
4950.0,
9450.0,
17500.0,
17550.0,
17600.0
]
layers_missing = [ int(round(z/50)) for z in layers_missing_z ]
layers_missing.sort()
layers_to_insert = layers_missing[:]
# Find the layers we already have:
directories = filter(lambda x: re.match('\d+$', x), os.listdir('.'))
directories = [int(x, 10) for x in directories]
directories.sort()
directory_mapping = zip(directories, directories)
while layers_to_insert:
missing_layer = layers_to_insert.pop(0)
for i, t in enumerate(directory_mapping):
if t[1] >= missing_layer:
directory_mapping[i] = (t[0], t[1] + 1)
directory_mapping.reverse()
for t in directory_mapping:
if t[0] != t[1]:
print "Will rename", t[0], "to", t[1]
subprocess.check_call(["mv", str(t[0]), str(t[1])])
for l in layers_missing:
print "Will create directory for missing layer", l
os.mkdir(str(l))
|
<commit_before><commit_msg>Add a script to rename CATMAID image directories to take account of missing layers<commit_after>
|
#!/usr/bin/python
# This script renames the directories in a CATMAID stack's image root
# to take into account missing layers corresponding to particular
# z-values. This can occur when exporting the data from TrakEM2.
import glob, os, re, sys, subprocess
layers_missing_z = [
4950.0,
9450.0,
17500.0,
17550.0,
17600.0
]
layers_missing = [ int(round(z/50)) for z in layers_missing_z ]
layers_missing.sort()
layers_to_insert = layers_missing[:]
# Find the layers we already have:
directories = filter(lambda x: re.match('\d+$', x), os.listdir('.'))
directories = [int(x, 10) for x in directories]
directories.sort()
directory_mapping = zip(directories, directories)
while layers_to_insert:
missing_layer = layers_to_insert.pop(0)
for i, t in enumerate(directory_mapping):
if t[1] >= missing_layer:
directory_mapping[i] = (t[0], t[1] + 1)
directory_mapping.reverse()
for t in directory_mapping:
if t[0] != t[1]:
print "Will rename", t[0], "to", t[1]
subprocess.check_call(["mv", str(t[0]), str(t[1])])
for l in layers_missing:
print "Will create directory for missing layer", l
os.mkdir(str(l))
|
Add a script to rename CATMAID image directories to take account of missing layers#!/usr/bin/python
# This script renames the directories in a CATMAID stack's image root
# to take into account missing layers corresponding to particular
# z-values. This can occur when exporting the data from TrakEM2.
import glob, os, re, sys, subprocess
layers_missing_z = [
4950.0,
9450.0,
17500.0,
17550.0,
17600.0
]
layers_missing = [ int(round(z/50)) for z in layers_missing_z ]
layers_missing.sort()
layers_to_insert = layers_missing[:]
# Find the layers we already have:
directories = filter(lambda x: re.match('\d+$', x), os.listdir('.'))
directories = [int(x, 10) for x in directories]
directories.sort()
directory_mapping = zip(directories, directories)
while layers_to_insert:
missing_layer = layers_to_insert.pop(0)
for i, t in enumerate(directory_mapping):
if t[1] >= missing_layer:
directory_mapping[i] = (t[0], t[1] + 1)
directory_mapping.reverse()
for t in directory_mapping:
if t[0] != t[1]:
print "Will rename", t[0], "to", t[1]
subprocess.check_call(["mv", str(t[0]), str(t[1])])
for l in layers_missing:
print "Will create directory for missing layer", l
os.mkdir(str(l))
|
<commit_before><commit_msg>Add a script to rename CATMAID image directories to take account of missing layers<commit_after>#!/usr/bin/python
# This script renames the directories in a CATMAID stack's image root
# to take into account missing layers corresponding to particular
# z-values. This can occur when exporting the data from TrakEM2.
import glob, os, re, sys, subprocess
layers_missing_z = [
4950.0,
9450.0,
17500.0,
17550.0,
17600.0
]
layers_missing = [ int(round(z/50)) for z in layers_missing_z ]
layers_missing.sort()
layers_to_insert = layers_missing[:]
# Find the layers we already have:
directories = filter(lambda x: re.match('\d+$', x), os.listdir('.'))
directories = [int(x, 10) for x in directories]
directories.sort()
directory_mapping = zip(directories, directories)
while layers_to_insert:
missing_layer = layers_to_insert.pop(0)
for i, t in enumerate(directory_mapping):
if t[1] >= missing_layer:
directory_mapping[i] = (t[0], t[1] + 1)
directory_mapping.reverse()
for t in directory_mapping:
if t[0] != t[1]:
print "Will rename", t[0], "to", t[1]
subprocess.check_call(["mv", str(t[0]), str(t[1])])
for l in layers_missing:
print "Will create directory for missing layer", l
os.mkdir(str(l))
|
|
69738f5b8e9604b75ad49a72e86f322a7300e7c9
|
tests/test_automaton.py
|
tests/test_automaton.py
|
#!/usr/bin/env python3
"""Functions for testing the Automaton abstract base class."""
import nose.tools as nose
from automata.base.automaton import Automaton
def test_abstract_methods_not_implemented():
"""Should raise NotImplementedError when calling abstract methods."""
with nose.assert_raises(NotImplementedError):
Automaton.__init__(Automaton)
with nose.assert_raises(NotImplementedError):
Automaton.validate_self(Automaton)
with nose.assert_raises(NotImplementedError):
Automaton._validate_input_yield(Automaton, None)
|
Add tests for abstract method NotImplementedError
|
Add tests for abstract method NotImplementedError
|
Python
|
mit
|
caleb531/automata
|
Add tests for abstract method NotImplementedError
|
#!/usr/bin/env python3
"""Functions for testing the Automaton abstract base class."""
import nose.tools as nose
from automata.base.automaton import Automaton
def test_abstract_methods_not_implemented():
"""Should raise NotImplementedError when calling abstract methods."""
with nose.assert_raises(NotImplementedError):
Automaton.__init__(Automaton)
with nose.assert_raises(NotImplementedError):
Automaton.validate_self(Automaton)
with nose.assert_raises(NotImplementedError):
Automaton._validate_input_yield(Automaton, None)
|
<commit_before><commit_msg>Add tests for abstract method NotImplementedError<commit_after>
|
#!/usr/bin/env python3
"""Functions for testing the Automaton abstract base class."""
import nose.tools as nose
from automata.base.automaton import Automaton
def test_abstract_methods_not_implemented():
"""Should raise NotImplementedError when calling abstract methods."""
with nose.assert_raises(NotImplementedError):
Automaton.__init__(Automaton)
with nose.assert_raises(NotImplementedError):
Automaton.validate_self(Automaton)
with nose.assert_raises(NotImplementedError):
Automaton._validate_input_yield(Automaton, None)
|
Add tests for abstract method NotImplementedError#!/usr/bin/env python3
"""Functions for testing the Automaton abstract base class."""
import nose.tools as nose
from automata.base.automaton import Automaton
def test_abstract_methods_not_implemented():
"""Should raise NotImplementedError when calling abstract methods."""
with nose.assert_raises(NotImplementedError):
Automaton.__init__(Automaton)
with nose.assert_raises(NotImplementedError):
Automaton.validate_self(Automaton)
with nose.assert_raises(NotImplementedError):
Automaton._validate_input_yield(Automaton, None)
|
<commit_before><commit_msg>Add tests for abstract method NotImplementedError<commit_after>#!/usr/bin/env python3
"""Functions for testing the Automaton abstract base class."""
import nose.tools as nose
from automata.base.automaton import Automaton
def test_abstract_methods_not_implemented():
"""Should raise NotImplementedError when calling abstract methods."""
with nose.assert_raises(NotImplementedError):
Automaton.__init__(Automaton)
with nose.assert_raises(NotImplementedError):
Automaton.validate_self(Automaton)
with nose.assert_raises(NotImplementedError):
Automaton._validate_input_yield(Automaton, None)
|
|
07867356f110026a2249d49fe5e583c42fc2a048
|
tests/test_singleton.py
|
tests/test_singleton.py
|
import unittest
from nose.tools import *
import mock
from gargoyle.settings import manager
import gargoyle.models
class TestGargoyle(unittest.TestCase):
def test_gargoyle_global_is_a_switch_manager(self):
reload(gargoyle.singleton)
self.assertIsInstance(gargoyle.singleton.gargoyle,
gargoyle.models.Manager)
def test_consructs_manager_with_storage_engine_from_settings(self):
with mock.patch('gargoyle.models.Manager') as init:
init.return_value = None
reload(gargoyle.singleton)
expected = ((), {'storage': manager.storage_engine})
eq_(init.call_args, expected)
|
import unittest
from nose.tools import *
import mock
from gargoyle.settings import manager
import gargoyle.models
class TestGargoyle(unittest.TestCase):
other_engine = dict()
def test_gargoyle_global_is_a_switch_manager(self):
reload(gargoyle.singleton)
self.assertIsInstance(gargoyle.singleton.gargoyle,
gargoyle.models.Manager)
def test_consructs_manager_with_storage_engine_from_settings(self):
with mock.patch('gargoyle.models.Manager') as init:
init.return_value = None
reload(gargoyle.singleton)
expected = ((), {'storage': manager.storage_engine})
eq_(init.call_args, expected)
def test_can_change_storage_engine_before_importing(self):
with mock.patch('gargoyle.models.Manager') as init:
init.return_value = None
manager.storage_engine = self.other_engine
reload(gargoyle.singleton)
expected = ((), dict(storage=self.other_engine))
eq_(init.call_args, expected)
|
Add another test to make sure you can import settings and change them before importing the singleton
|
Add another test to make sure you can import settings and change them before importing the singleton
|
Python
|
apache-2.0
|
kalail/gutter,disqus/gutter,kalail/gutter,kalail/gutter,disqus/gutter
|
import unittest
from nose.tools import *
import mock
from gargoyle.settings import manager
import gargoyle.models
class TestGargoyle(unittest.TestCase):
def test_gargoyle_global_is_a_switch_manager(self):
reload(gargoyle.singleton)
self.assertIsInstance(gargoyle.singleton.gargoyle,
gargoyle.models.Manager)
def test_consructs_manager_with_storage_engine_from_settings(self):
with mock.patch('gargoyle.models.Manager') as init:
init.return_value = None
reload(gargoyle.singleton)
expected = ((), {'storage': manager.storage_engine})
eq_(init.call_args, expected)
Add another test to make sure you can import settings and change them before importing the singleton
|
import unittest
from nose.tools import *
import mock
from gargoyle.settings import manager
import gargoyle.models
class TestGargoyle(unittest.TestCase):
other_engine = dict()
def test_gargoyle_global_is_a_switch_manager(self):
reload(gargoyle.singleton)
self.assertIsInstance(gargoyle.singleton.gargoyle,
gargoyle.models.Manager)
def test_consructs_manager_with_storage_engine_from_settings(self):
with mock.patch('gargoyle.models.Manager') as init:
init.return_value = None
reload(gargoyle.singleton)
expected = ((), {'storage': manager.storage_engine})
eq_(init.call_args, expected)
def test_can_change_storage_engine_before_importing(self):
with mock.patch('gargoyle.models.Manager') as init:
init.return_value = None
manager.storage_engine = self.other_engine
reload(gargoyle.singleton)
expected = ((), dict(storage=self.other_engine))
eq_(init.call_args, expected)
|
<commit_before>import unittest
from nose.tools import *
import mock
from gargoyle.settings import manager
import gargoyle.models
class TestGargoyle(unittest.TestCase):
def test_gargoyle_global_is_a_switch_manager(self):
reload(gargoyle.singleton)
self.assertIsInstance(gargoyle.singleton.gargoyle,
gargoyle.models.Manager)
def test_consructs_manager_with_storage_engine_from_settings(self):
with mock.patch('gargoyle.models.Manager') as init:
init.return_value = None
reload(gargoyle.singleton)
expected = ((), {'storage': manager.storage_engine})
eq_(init.call_args, expected)
<commit_msg>Add another test to make sure you can import settings and change them before importing the singleton<commit_after>
|
import unittest
from nose.tools import *
import mock
from gargoyle.settings import manager
import gargoyle.models
class TestGargoyle(unittest.TestCase):
other_engine = dict()
def test_gargoyle_global_is_a_switch_manager(self):
reload(gargoyle.singleton)
self.assertIsInstance(gargoyle.singleton.gargoyle,
gargoyle.models.Manager)
def test_consructs_manager_with_storage_engine_from_settings(self):
with mock.patch('gargoyle.models.Manager') as init:
init.return_value = None
reload(gargoyle.singleton)
expected = ((), {'storage': manager.storage_engine})
eq_(init.call_args, expected)
def test_can_change_storage_engine_before_importing(self):
with mock.patch('gargoyle.models.Manager') as init:
init.return_value = None
manager.storage_engine = self.other_engine
reload(gargoyle.singleton)
expected = ((), dict(storage=self.other_engine))
eq_(init.call_args, expected)
|
import unittest
from nose.tools import *
import mock
from gargoyle.settings import manager
import gargoyle.models
class TestGargoyle(unittest.TestCase):
def test_gargoyle_global_is_a_switch_manager(self):
reload(gargoyle.singleton)
self.assertIsInstance(gargoyle.singleton.gargoyle,
gargoyle.models.Manager)
def test_consructs_manager_with_storage_engine_from_settings(self):
with mock.patch('gargoyle.models.Manager') as init:
init.return_value = None
reload(gargoyle.singleton)
expected = ((), {'storage': manager.storage_engine})
eq_(init.call_args, expected)
Add another test to make sure you can import settings and change them before importing the singletonimport unittest
from nose.tools import *
import mock
from gargoyle.settings import manager
import gargoyle.models
class TestGargoyle(unittest.TestCase):
other_engine = dict()
def test_gargoyle_global_is_a_switch_manager(self):
reload(gargoyle.singleton)
self.assertIsInstance(gargoyle.singleton.gargoyle,
gargoyle.models.Manager)
def test_consructs_manager_with_storage_engine_from_settings(self):
with mock.patch('gargoyle.models.Manager') as init:
init.return_value = None
reload(gargoyle.singleton)
expected = ((), {'storage': manager.storage_engine})
eq_(init.call_args, expected)
def test_can_change_storage_engine_before_importing(self):
with mock.patch('gargoyle.models.Manager') as init:
init.return_value = None
manager.storage_engine = self.other_engine
reload(gargoyle.singleton)
expected = ((), dict(storage=self.other_engine))
eq_(init.call_args, expected)
|
<commit_before>import unittest
from nose.tools import *
import mock
from gargoyle.settings import manager
import gargoyle.models
class TestGargoyle(unittest.TestCase):
def test_gargoyle_global_is_a_switch_manager(self):
reload(gargoyle.singleton)
self.assertIsInstance(gargoyle.singleton.gargoyle,
gargoyle.models.Manager)
def test_consructs_manager_with_storage_engine_from_settings(self):
with mock.patch('gargoyle.models.Manager') as init:
init.return_value = None
reload(gargoyle.singleton)
expected = ((), {'storage': manager.storage_engine})
eq_(init.call_args, expected)
<commit_msg>Add another test to make sure you can import settings and change them before importing the singleton<commit_after>import unittest
from nose.tools import *
import mock
from gargoyle.settings import manager
import gargoyle.models
class TestGargoyle(unittest.TestCase):
other_engine = dict()
def test_gargoyle_global_is_a_switch_manager(self):
reload(gargoyle.singleton)
self.assertIsInstance(gargoyle.singleton.gargoyle,
gargoyle.models.Manager)
def test_consructs_manager_with_storage_engine_from_settings(self):
with mock.patch('gargoyle.models.Manager') as init:
init.return_value = None
reload(gargoyle.singleton)
expected = ((), {'storage': manager.storage_engine})
eq_(init.call_args, expected)
def test_can_change_storage_engine_before_importing(self):
with mock.patch('gargoyle.models.Manager') as init:
init.return_value = None
manager.storage_engine = self.other_engine
reload(gargoyle.singleton)
expected = ((), dict(storage=self.other_engine))
eq_(init.call_args, expected)
|
ac8ec32258652deefb39b8d29e05e52ca28bf1b4
|
14B-088/HI/HI_correct_mask_model.py
|
14B-088/HI/HI_correct_mask_model.py
|
'''
Swap the spatial axes. Swap the spectral and stokes axes.
'''
import sys
from astropy.io import fits
hdu = fits.open(sys.argv[1], mode='update')
hdu[0].data = hdu[0].data.swapaxes(0, 1)
hdu[0].data = hdu[0].data[:, :, ::-1, ::-1]
hdu.flush()
execfile("~/Dropbox/code_development/ewky_scripts/header_swap_axis.py")
hdu[0].header = header_swapaxes(hdu[0].header, 2, 3)
hdu.flush()
hdu.close()
|
Correct regridded mask and model
|
Correct regridded mask and model
|
Python
|
mit
|
e-koch/VLA_Lband,e-koch/VLA_Lband
|
Correct regridded mask and model
|
'''
Swap the spatial axes. Swap the spectral and stokes axes.
'''
import sys
from astropy.io import fits
hdu = fits.open(sys.argv[1], mode='update')
hdu[0].data = hdu[0].data.swapaxes(0, 1)
hdu[0].data = hdu[0].data[:, :, ::-1, ::-1]
hdu.flush()
execfile("~/Dropbox/code_development/ewky_scripts/header_swap_axis.py")
hdu[0].header = header_swapaxes(hdu[0].header, 2, 3)
hdu.flush()
hdu.close()
|
<commit_before><commit_msg>Correct regridded mask and model<commit_after>
|
'''
Swap the spatial axes. Swap the spectral and stokes axes.
'''
import sys
from astropy.io import fits
hdu = fits.open(sys.argv[1], mode='update')
hdu[0].data = hdu[0].data.swapaxes(0, 1)
hdu[0].data = hdu[0].data[:, :, ::-1, ::-1]
hdu.flush()
execfile("~/Dropbox/code_development/ewky_scripts/header_swap_axis.py")
hdu[0].header = header_swapaxes(hdu[0].header, 2, 3)
hdu.flush()
hdu.close()
|
Correct regridded mask and model
'''
Swap the spatial axes. Swap the spectral and stokes axes.
'''
import sys
from astropy.io import fits
hdu = fits.open(sys.argv[1], mode='update')
hdu[0].data = hdu[0].data.swapaxes(0, 1)
hdu[0].data = hdu[0].data[:, :, ::-1, ::-1]
hdu.flush()
execfile("~/Dropbox/code_development/ewky_scripts/header_swap_axis.py")
hdu[0].header = header_swapaxes(hdu[0].header, 2, 3)
hdu.flush()
hdu.close()
|
<commit_before><commit_msg>Correct regridded mask and model<commit_after>
'''
Swap the spatial axes. Swap the spectral and stokes axes.
'''
import sys
from astropy.io import fits
hdu = fits.open(sys.argv[1], mode='update')
hdu[0].data = hdu[0].data.swapaxes(0, 1)
hdu[0].data = hdu[0].data[:, :, ::-1, ::-1]
hdu.flush()
execfile("~/Dropbox/code_development/ewky_scripts/header_swap_axis.py")
hdu[0].header = header_swapaxes(hdu[0].header, 2, 3)
hdu.flush()
hdu.close()
|
|
b0d13f4f6332e18390a1d8e0152e55b8fb2e780e
|
sdntest/examples/customtopo/triangle.py
|
sdntest/examples/customtopo/triangle.py
|
"""Custom topology example
s1---s2
| /
| /
| /
s3
Consist of three fixed core switches, and each core switches will connect to m hosts through n switches.
"""
from mininet.topo import Topo
from optparse import OptionParser
class MyTopo( Topo ):
"Simple topology example."
# def __init__( self ):
def build( self, m=1, n=1 ):
"Create custom topo."
# Initialize topology
#Topo.__init__( self )
switch_index = 1
host_index = 1
# core = ['space']
switch = ['space']
host = ['space']
# parser = OptionParser()
# parser.add_option("-m", action="store", type="int", dest="m")
# parser.add_option("-n", action="store", type="int", dest="n")
# (options, args) = parser,parse_args()
# print options.m
# print options.n
#m = 2
#n = 2
CORE_NUMBER = 3
for i in range(1, CORE_NUMBER+1):
switch.append(self.addSwitch( 's'+str(switch_index) ))
switch_index = switch_index + 1
for k in range(1, CORE_NUMBER+1):
if (k==CORE_NUMBER):
self.addLink( switch[k], switch[1] )
else:
self.addLink( switch[k], switch[k+1] )
for i in range(1,m+1):
for j in range(1,n+1):
switch.append(self.addSwitch( 's'+str(switch_index) ))
if (j==1):
self.addLink( switch[k],switch[switch_index] )
else:
self.addLink( switch[switch_index-1],switch[switch_index])
switch_index = switch_index + 1
host.append(self.addHost( 'h'+str(host_index)))
self.addLink( host[host_index], switch[switch_index-1])
host_index = host_index + 1
print "total_switches=%u"%(switch_index-1+3)
print "total_hosts=%u"%(host_index-1)
print "total_nodes=%u"%(switch_index-1+3+host_index-1)
topos = { 'mytopo': ( lambda m,n:MyTopo(m, n) ) }
|
Add a custom topology example
|
Add a custom topology example
|
Python
|
mit
|
snlab-freedom/sdntest,snlab-freedom/sdntest
|
Add a custom topology example
|
"""Custom topology example
s1---s2
| /
| /
| /
s3
Consist of three fixed core switches, and each core switches will connect to m hosts through n switches.
"""
from mininet.topo import Topo
from optparse import OptionParser
class MyTopo( Topo ):
"Simple topology example."
# def __init__( self ):
def build( self, m=1, n=1 ):
"Create custom topo."
# Initialize topology
#Topo.__init__( self )
switch_index = 1
host_index = 1
# core = ['space']
switch = ['space']
host = ['space']
# parser = OptionParser()
# parser.add_option("-m", action="store", type="int", dest="m")
# parser.add_option("-n", action="store", type="int", dest="n")
# (options, args) = parser,parse_args()
# print options.m
# print options.n
#m = 2
#n = 2
CORE_NUMBER = 3
for i in range(1, CORE_NUMBER+1):
switch.append(self.addSwitch( 's'+str(switch_index) ))
switch_index = switch_index + 1
for k in range(1, CORE_NUMBER+1):
if (k==CORE_NUMBER):
self.addLink( switch[k], switch[1] )
else:
self.addLink( switch[k], switch[k+1] )
for i in range(1,m+1):
for j in range(1,n+1):
switch.append(self.addSwitch( 's'+str(switch_index) ))
if (j==1):
self.addLink( switch[k],switch[switch_index] )
else:
self.addLink( switch[switch_index-1],switch[switch_index])
switch_index = switch_index + 1
host.append(self.addHost( 'h'+str(host_index)))
self.addLink( host[host_index], switch[switch_index-1])
host_index = host_index + 1
print "total_switches=%u"%(switch_index-1+3)
print "total_hosts=%u"%(host_index-1)
print "total_nodes=%u"%(switch_index-1+3+host_index-1)
topos = { 'mytopo': ( lambda m,n:MyTopo(m, n) ) }
|
<commit_before><commit_msg>Add a custom topology example<commit_after>
|
"""Custom topology example
s1---s2
| /
| /
| /
s3
Consist of three fixed core switches, and each core switches will connect to m hosts through n switches.
"""
from mininet.topo import Topo
from optparse import OptionParser
class MyTopo( Topo ):
"Simple topology example."
# def __init__( self ):
def build( self, m=1, n=1 ):
"Create custom topo."
# Initialize topology
#Topo.__init__( self )
switch_index = 1
host_index = 1
# core = ['space']
switch = ['space']
host = ['space']
# parser = OptionParser()
# parser.add_option("-m", action="store", type="int", dest="m")
# parser.add_option("-n", action="store", type="int", dest="n")
# (options, args) = parser,parse_args()
# print options.m
# print options.n
#m = 2
#n = 2
CORE_NUMBER = 3
for i in range(1, CORE_NUMBER+1):
switch.append(self.addSwitch( 's'+str(switch_index) ))
switch_index = switch_index + 1
for k in range(1, CORE_NUMBER+1):
if (k==CORE_NUMBER):
self.addLink( switch[k], switch[1] )
else:
self.addLink( switch[k], switch[k+1] )
for i in range(1,m+1):
for j in range(1,n+1):
switch.append(self.addSwitch( 's'+str(switch_index) ))
if (j==1):
self.addLink( switch[k],switch[switch_index] )
else:
self.addLink( switch[switch_index-1],switch[switch_index])
switch_index = switch_index + 1
host.append(self.addHost( 'h'+str(host_index)))
self.addLink( host[host_index], switch[switch_index-1])
host_index = host_index + 1
print "total_switches=%u"%(switch_index-1+3)
print "total_hosts=%u"%(host_index-1)
print "total_nodes=%u"%(switch_index-1+3+host_index-1)
topos = { 'mytopo': ( lambda m,n:MyTopo(m, n) ) }
|
Add a custom topology example"""Custom topology example
s1---s2
| /
| /
| /
s3
Consist of three fixed core switches, and each core switches will connect to m hosts through n switches.
"""
from mininet.topo import Topo
from optparse import OptionParser
class MyTopo( Topo ):
"Simple topology example."
# def __init__( self ):
def build( self, m=1, n=1 ):
"Create custom topo."
# Initialize topology
#Topo.__init__( self )
switch_index = 1
host_index = 1
# core = ['space']
switch = ['space']
host = ['space']
# parser = OptionParser()
# parser.add_option("-m", action="store", type="int", dest="m")
# parser.add_option("-n", action="store", type="int", dest="n")
# (options, args) = parser,parse_args()
# print options.m
# print options.n
#m = 2
#n = 2
CORE_NUMBER = 3
for i in range(1, CORE_NUMBER+1):
switch.append(self.addSwitch( 's'+str(switch_index) ))
switch_index = switch_index + 1
for k in range(1, CORE_NUMBER+1):
if (k==CORE_NUMBER):
self.addLink( switch[k], switch[1] )
else:
self.addLink( switch[k], switch[k+1] )
for i in range(1,m+1):
for j in range(1,n+1):
switch.append(self.addSwitch( 's'+str(switch_index) ))
if (j==1):
self.addLink( switch[k],switch[switch_index] )
else:
self.addLink( switch[switch_index-1],switch[switch_index])
switch_index = switch_index + 1
host.append(self.addHost( 'h'+str(host_index)))
self.addLink( host[host_index], switch[switch_index-1])
host_index = host_index + 1
print "total_switches=%u"%(switch_index-1+3)
print "total_hosts=%u"%(host_index-1)
print "total_nodes=%u"%(switch_index-1+3+host_index-1)
topos = { 'mytopo': ( lambda m,n:MyTopo(m, n) ) }
|
<commit_before><commit_msg>Add a custom topology example<commit_after>"""Custom topology example
s1---s2
| /
| /
| /
s3
Consist of three fixed core switches, and each core switches will connect to m hosts through n switches.
"""
from mininet.topo import Topo
from optparse import OptionParser
class MyTopo( Topo ):
"Simple topology example."
# def __init__( self ):
def build( self, m=1, n=1 ):
"Create custom topo."
# Initialize topology
#Topo.__init__( self )
switch_index = 1
host_index = 1
# core = ['space']
switch = ['space']
host = ['space']
# parser = OptionParser()
# parser.add_option("-m", action="store", type="int", dest="m")
# parser.add_option("-n", action="store", type="int", dest="n")
# (options, args) = parser,parse_args()
# print options.m
# print options.n
#m = 2
#n = 2
CORE_NUMBER = 3
for i in range(1, CORE_NUMBER+1):
switch.append(self.addSwitch( 's'+str(switch_index) ))
switch_index = switch_index + 1
for k in range(1, CORE_NUMBER+1):
if (k==CORE_NUMBER):
self.addLink( switch[k], switch[1] )
else:
self.addLink( switch[k], switch[k+1] )
for i in range(1,m+1):
for j in range(1,n+1):
switch.append(self.addSwitch( 's'+str(switch_index) ))
if (j==1):
self.addLink( switch[k],switch[switch_index] )
else:
self.addLink( switch[switch_index-1],switch[switch_index])
switch_index = switch_index + 1
host.append(self.addHost( 'h'+str(host_index)))
self.addLink( host[host_index], switch[switch_index-1])
host_index = host_index + 1
print "total_switches=%u"%(switch_index-1+3)
print "total_hosts=%u"%(host_index-1)
print "total_nodes=%u"%(switch_index-1+3+host_index-1)
topos = { 'mytopo': ( lambda m,n:MyTopo(m, n) ) }
|
|
bdbd262be925e318a6096606884af793aca158f7
|
Lib/fontbakery/profiles/iso15008.py
|
Lib/fontbakery/profiles/iso15008.py
|
"""
Checks for suitability for in-car displays (ISO 15008).
"""
from fontbakery.callable import check
from fontbakery.section import Section
from fontbakery.status import PASS, FAIL, WARN
from fontbakery.fonts_profile import profile_factory
from fontbakery.message import Message
from fontTools.pens.boundsPen import BoundsPen
profile = profile_factory(default_section=Section("Suitability for In-Car Display"))
DISCLAIMER = """
(Note that PASSing this check does not guarantee compliance with ISO 15008.)
"""
CHECKS = ["com.google.fonts/check/iso15008_proportions"]
@check(
id="com.google.fonts/check/iso15008_proportions",
rationale="""
According to ISO 15008, fonts used for in-car displays should not be too narrow or too wide.
To ensure legibility of this font on in-car information systems, it is recommended that the ratio of H width to H height is between 0.65 and 0.80."""
+ DISCLAIMER,
)
def com_google_fonts_check_iso15008_proportions(ttFont):
"""Check if 0.65 => (H width / H height) => 0.80"""
glyphset = ttFont.getGlyphSet()
if "H" not in glyphset:
yield FAIL, Message(
"glyph-not-present",
"There was no 'H' glyph in the font, so the proportions could not be tested",
)
h_glyph = glyphset["H"]
pen = BoundsPen(glyphset)
h_glyph._glyph.draw(pen, ttFont.get("glyf"))
(xMin, yMin, xMax, yMax) = pen.bounds
proportion = (xMax - xMin) / (yMax - yMin)
if 0.65 <= proportion <= 0.80:
yield PASS, "the letter H is not too narrow or too wide"
else:
yield FAIL, Message(
"invalid-proportion",
f"The proportion of H width to H height ({proportion})"
f"does not conform to the expected range of 0.65-0.80",
)
profile.auto_register(globals())
profile.test_expected_checks(CHECKS, exclusive=True)
|
Add new ISO15008 profile and proportion check
|
Add new ISO15008 profile and proportion check
|
Python
|
apache-2.0
|
moyogo/fontbakery,googlefonts/fontbakery,moyogo/fontbakery,googlefonts/fontbakery,moyogo/fontbakery,googlefonts/fontbakery
|
Add new ISO15008 profile and proportion check
|
"""
Checks for suitability for in-car displays (ISO 15008).
"""
from fontbakery.callable import check
from fontbakery.section import Section
from fontbakery.status import PASS, FAIL, WARN
from fontbakery.fonts_profile import profile_factory
from fontbakery.message import Message
from fontTools.pens.boundsPen import BoundsPen
profile = profile_factory(default_section=Section("Suitability for In-Car Display"))
DISCLAIMER = """
(Note that PASSing this check does not guarantee compliance with ISO 15008.)
"""
CHECKS = ["com.google.fonts/check/iso15008_proportions"]
@check(
id="com.google.fonts/check/iso15008_proportions",
rationale="""
According to ISO 15008, fonts used for in-car displays should not be too narrow or too wide.
To ensure legibility of this font on in-car information systems, it is recommended that the ratio of H width to H height is between 0.65 and 0.80."""
+ DISCLAIMER,
)
def com_google_fonts_check_iso15008_proportions(ttFont):
"""Check if 0.65 => (H width / H height) => 0.80"""
glyphset = ttFont.getGlyphSet()
if "H" not in glyphset:
yield FAIL, Message(
"glyph-not-present",
"There was no 'H' glyph in the font, so the proportions could not be tested",
)
h_glyph = glyphset["H"]
pen = BoundsPen(glyphset)
h_glyph._glyph.draw(pen, ttFont.get("glyf"))
(xMin, yMin, xMax, yMax) = pen.bounds
proportion = (xMax - xMin) / (yMax - yMin)
if 0.65 <= proportion <= 0.80:
yield PASS, "the letter H is not too narrow or too wide"
else:
yield FAIL, Message(
"invalid-proportion",
f"The proportion of H width to H height ({proportion})"
f"does not conform to the expected range of 0.65-0.80",
)
profile.auto_register(globals())
profile.test_expected_checks(CHECKS, exclusive=True)
|
<commit_before><commit_msg>Add new ISO15008 profile and proportion check<commit_after>
|
"""
Checks for suitability for in-car displays (ISO 15008).
"""
from fontbakery.callable import check
from fontbakery.section import Section
from fontbakery.status import PASS, FAIL, WARN
from fontbakery.fonts_profile import profile_factory
from fontbakery.message import Message
from fontTools.pens.boundsPen import BoundsPen
profile = profile_factory(default_section=Section("Suitability for In-Car Display"))
DISCLAIMER = """
(Note that PASSing this check does not guarantee compliance with ISO 15008.)
"""
CHECKS = ["com.google.fonts/check/iso15008_proportions"]
@check(
id="com.google.fonts/check/iso15008_proportions",
rationale="""
According to ISO 15008, fonts used for in-car displays should not be too narrow or too wide.
To ensure legibility of this font on in-car information systems, it is recommended that the ratio of H width to H height is between 0.65 and 0.80."""
+ DISCLAIMER,
)
def com_google_fonts_check_iso15008_proportions(ttFont):
"""Check if 0.65 => (H width / H height) => 0.80"""
glyphset = ttFont.getGlyphSet()
if "H" not in glyphset:
yield FAIL, Message(
"glyph-not-present",
"There was no 'H' glyph in the font, so the proportions could not be tested",
)
h_glyph = glyphset["H"]
pen = BoundsPen(glyphset)
h_glyph._glyph.draw(pen, ttFont.get("glyf"))
(xMin, yMin, xMax, yMax) = pen.bounds
proportion = (xMax - xMin) / (yMax - yMin)
if 0.65 <= proportion <= 0.80:
yield PASS, "the letter H is not too narrow or too wide"
else:
yield FAIL, Message(
"invalid-proportion",
f"The proportion of H width to H height ({proportion})"
f"does not conform to the expected range of 0.65-0.80",
)
profile.auto_register(globals())
profile.test_expected_checks(CHECKS, exclusive=True)
|
Add new ISO15008 profile and proportion check"""
Checks for suitability for in-car displays (ISO 15008).
"""
from fontbakery.callable import check
from fontbakery.section import Section
from fontbakery.status import PASS, FAIL, WARN
from fontbakery.fonts_profile import profile_factory
from fontbakery.message import Message
from fontTools.pens.boundsPen import BoundsPen
profile = profile_factory(default_section=Section("Suitability for In-Car Display"))
DISCLAIMER = """
(Note that PASSing this check does not guarantee compliance with ISO 15008.)
"""
CHECKS = ["com.google.fonts/check/iso15008_proportions"]
@check(
id="com.google.fonts/check/iso15008_proportions",
rationale="""
According to ISO 15008, fonts used for in-car displays should not be too narrow or too wide.
To ensure legibility of this font on in-car information systems, it is recommended that the ratio of H width to H height is between 0.65 and 0.80."""
+ DISCLAIMER,
)
def com_google_fonts_check_iso15008_proportions(ttFont):
"""Check if 0.65 => (H width / H height) => 0.80"""
glyphset = ttFont.getGlyphSet()
if "H" not in glyphset:
yield FAIL, Message(
"glyph-not-present",
"There was no 'H' glyph in the font, so the proportions could not be tested",
)
h_glyph = glyphset["H"]
pen = BoundsPen(glyphset)
h_glyph._glyph.draw(pen, ttFont.get("glyf"))
(xMin, yMin, xMax, yMax) = pen.bounds
proportion = (xMax - xMin) / (yMax - yMin)
if 0.65 <= proportion <= 0.80:
yield PASS, "the letter H is not too narrow or too wide"
else:
yield FAIL, Message(
"invalid-proportion",
f"The proportion of H width to H height ({proportion})"
f"does not conform to the expected range of 0.65-0.80",
)
profile.auto_register(globals())
profile.test_expected_checks(CHECKS, exclusive=True)
|
<commit_before><commit_msg>Add new ISO15008 profile and proportion check<commit_after>"""
Checks for suitability for in-car displays (ISO 15008).
"""
from fontbakery.callable import check
from fontbakery.section import Section
from fontbakery.status import PASS, FAIL, WARN
from fontbakery.fonts_profile import profile_factory
from fontbakery.message import Message
from fontTools.pens.boundsPen import BoundsPen
profile = profile_factory(default_section=Section("Suitability for In-Car Display"))
DISCLAIMER = """
(Note that PASSing this check does not guarantee compliance with ISO 15008.)
"""
CHECKS = ["com.google.fonts/check/iso15008_proportions"]
@check(
id="com.google.fonts/check/iso15008_proportions",
rationale="""
According to ISO 15008, fonts used for in-car displays should not be too narrow or too wide.
To ensure legibility of this font on in-car information systems, it is recommended that the ratio of H width to H height is between 0.65 and 0.80."""
+ DISCLAIMER,
)
def com_google_fonts_check_iso15008_proportions(ttFont):
"""Check if 0.65 => (H width / H height) => 0.80"""
glyphset = ttFont.getGlyphSet()
if "H" not in glyphset:
yield FAIL, Message(
"glyph-not-present",
"There was no 'H' glyph in the font, so the proportions could not be tested",
)
h_glyph = glyphset["H"]
pen = BoundsPen(glyphset)
h_glyph._glyph.draw(pen, ttFont.get("glyf"))
(xMin, yMin, xMax, yMax) = pen.bounds
proportion = (xMax - xMin) / (yMax - yMin)
if 0.65 <= proportion <= 0.80:
yield PASS, "the letter H is not too narrow or too wide"
else:
yield FAIL, Message(
"invalid-proportion",
f"The proportion of H width to H height ({proportion})"
f"does not conform to the expected range of 0.65-0.80",
)
profile.auto_register(globals())
profile.test_expected_checks(CHECKS, exclusive=True)
|
|
655d7777342b8446d94d0f81dda293740c574d3d
|
strategy/artifacting.py
|
strategy/artifacting.py
|
# Copyright 2017, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import linear
class StrategyModule(linear.StrategyModule):
def _queue_task(self, host, task, task_vars, play_context):
"""Wipe the notification system and return for config tasks."""
task.notify = None
skip_tags = os.environ.get('OS_ANSIBLE_SKIP_TAGS', 'config')
skip_tags = skip_tags.split(',')
if any([True for i in skip_tags if i in task.tags]):
return
else:
return super(StrategyModule, self)._queue_task(
host,
task,
task_vars,
play_context
)
|
Add a new strategy filtering tasks
|
Add a new strategy filtering tasks
Ideally Ansible should allow (from a playbook) to run only part
of a role thanks to its tags (like the --skip-tags or --tags from
the cli). But it's not possible now.
This code is a PoC code for introducing a kind of filtering,
which basically skips tasks containing the word "config".
If you are building lxc images ("artifacts") of a role, you
probably don't want to run all the tasks in the role, and don't
want to run the handler tasks (because they start software for
example). This also addresses it.
To use this work, just include strategy: artifacting in your
play.
Change-Id: I006bc640c6563c959ceb835ddf5bef8d25dd7517
|
Python
|
apache-2.0
|
os-cloud/openstack-ansible-plugins,openstack/openstack-ansible-plugins,os-cloud/openstack-ansible-plugins,openstack/openstack-ansible-plugins
|
Add a new strategy filtering tasks
Ideally Ansible should allow (from a playbook) to run only part
of a role thanks to its tags (like the --skip-tags or --tags from
the cli). But it's not possible now.
This code is a PoC code for introducing a kind of filtering,
which basically skips tasks containing the word "config".
If you are building lxc images ("artifacts") of a role, you
probably don't want to run all the tasks in the role, and don't
want to run the handler tasks (because they start software for
example). This also addresses it.
To use this work, just include strategy: artifacting in your
play.
Change-Id: I006bc640c6563c959ceb835ddf5bef8d25dd7517
|
# Copyright 2017, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import linear
class StrategyModule(linear.StrategyModule):
def _queue_task(self, host, task, task_vars, play_context):
"""Wipe the notification system and return for config tasks."""
task.notify = None
skip_tags = os.environ.get('OS_ANSIBLE_SKIP_TAGS', 'config')
skip_tags = skip_tags.split(',')
if any([True for i in skip_tags if i in task.tags]):
return
else:
return super(StrategyModule, self)._queue_task(
host,
task,
task_vars,
play_context
)
|
<commit_before><commit_msg>Add a new strategy filtering tasks
Ideally Ansible should allow (from a playbook) to run only part
of a role thanks to its tags (like the --skip-tags or --tags from
the cli). But it's not possible now.
This code is a PoC code for introducing a kind of filtering,
which basically skips tasks containing the word "config".
If you are building lxc images ("artifacts") of a role, you
probably don't want to run all the tasks in the role, and don't
want to run the handler tasks (because they start software for
example). This also addresses it.
To use this work, just include strategy: artifacting in your
play.
Change-Id: I006bc640c6563c959ceb835ddf5bef8d25dd7517<commit_after>
|
# Copyright 2017, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import linear
class StrategyModule(linear.StrategyModule):
def _queue_task(self, host, task, task_vars, play_context):
"""Wipe the notification system and return for config tasks."""
task.notify = None
skip_tags = os.environ.get('OS_ANSIBLE_SKIP_TAGS', 'config')
skip_tags = skip_tags.split(',')
if any([True for i in skip_tags if i in task.tags]):
return
else:
return super(StrategyModule, self)._queue_task(
host,
task,
task_vars,
play_context
)
|
Add a new strategy filtering tasks
Ideally Ansible should allow (from a playbook) to run only part
of a role thanks to its tags (like the --skip-tags or --tags from
the cli). But it's not possible now.
This code is a PoC code for introducing a kind of filtering,
which basically skips tasks containing the word "config".
If you are building lxc images ("artifacts") of a role, you
probably don't want to run all the tasks in the role, and don't
want to run the handler tasks (because they start software for
example). This also addresses it.
To use this work, just include strategy: artifacting in your
play.
Change-Id: I006bc640c6563c959ceb835ddf5bef8d25dd7517# Copyright 2017, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import linear
class StrategyModule(linear.StrategyModule):
def _queue_task(self, host, task, task_vars, play_context):
"""Wipe the notification system and return for config tasks."""
task.notify = None
skip_tags = os.environ.get('OS_ANSIBLE_SKIP_TAGS', 'config')
skip_tags = skip_tags.split(',')
if any([True for i in skip_tags if i in task.tags]):
return
else:
return super(StrategyModule, self)._queue_task(
host,
task,
task_vars,
play_context
)
|
<commit_before><commit_msg>Add a new strategy filtering tasks
Ideally Ansible should allow (from a playbook) to run only part
of a role thanks to its tags (like the --skip-tags or --tags from
the cli). But it's not possible now.
This code is a PoC code for introducing a kind of filtering,
which basically skips tasks containing the word "config".
If you are building lxc images ("artifacts") of a role, you
probably don't want to run all the tasks in the role, and don't
want to run the handler tasks (because they start software for
example). This also addresses it.
To use this work, just include strategy: artifacting in your
play.
Change-Id: I006bc640c6563c959ceb835ddf5bef8d25dd7517<commit_after># Copyright 2017, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import linear
class StrategyModule(linear.StrategyModule):
def _queue_task(self, host, task, task_vars, play_context):
"""Wipe the notification system and return for config tasks."""
task.notify = None
skip_tags = os.environ.get('OS_ANSIBLE_SKIP_TAGS', 'config')
skip_tags = skip_tags.split(',')
if any([True for i in skip_tags if i in task.tags]):
return
else:
return super(StrategyModule, self)._queue_task(
host,
task,
task_vars,
play_context
)
|
|
2da56c5bd7b5f33eb8e7769cb76c29a64058c96e
|
flexget/plugins/est_released_series.py
|
flexget/plugins/est_released_series.py
|
from __future__ import unicode_literals, division, absolute_import
import logging
from sqlalchemy import desc, func
from flexget.manager import Session
from flexget.plugin import register_plugin, priority
from flexget.plugins.filter.series import SeriesDatabase, Series, Episode
from flexget.utils.tools import multiply_timedelta
log = logging.getLogger('est_series')
class EstimatesReleasedSeries(SeriesDatabase):
@priority(0) # Run only if better online lookups fail
def estimate(self, entry):
if all(field in entry for field in ['series_name', 'series_season', 'series_episode']):
session = Session()
series = session.query(Series).filter(Series.name == entry['series_name']).first()
if not series:
return
episodes = (session.query(Episode).join(Series).
filter(Episode.season != None).
filter(Series.id == series.id).
filter(Episode.season == func.max(Episode.season).select()).
order_by(desc(Episode.number)).limit(2).all())
if len(episodes) < 2:
return
last_diff = episodes[0].first_seen - episodes[1].first_seen
return episodes[0].first_seen + multiply_timedelta(last_diff, 0.9)
# TODO: Some fancier logic? Season break estimates?
register_plugin(EstimatesReleasedSeries, 'est_released_series', groups=['estimate_release'])
|
from __future__ import unicode_literals, division, absolute_import
import logging
from sqlalchemy import desc, func
from flexget.manager import Session
from flexget.plugin import register_plugin, priority, DependencyError
from flexget.utils.tools import multiply_timedelta
try:
from flexget.plugins.filter.series import Series, Episode
except ImportError:
raise DependencyError(issued_by='est_released_series', missing='series plugin', silent=True)
log = logging.getLogger('est_series')
class EstimatesReleasedSeries(object):
@priority(0) # Run only if better online lookups fail
def estimate(self, entry):
if all(field in entry for field in ['series_name', 'series_season', 'series_episode']):
session = Session()
series = session.query(Series).filter(Series.name == entry['series_name']).first()
if not series:
return
episodes = (session.query(Episode).join(Series).
filter(Episode.season != None).
filter(Series.id == series.id).
filter(Episode.season == func.max(Episode.season).select()).
order_by(desc(Episode.number)).limit(2).all())
if len(episodes) < 2:
return
last_diff = episodes[0].first_seen - episodes[1].first_seen
return episodes[0].first_seen + multiply_timedelta(last_diff, 0.9)
# TODO: Some fancier logic? Season break estimates?
register_plugin(EstimatesReleasedSeries, 'est_released_series', groups=['estimate_release'])
|
Raise DependencyError when series plugin is missing
|
Raise DependencyError when series plugin is missing
|
Python
|
mit
|
vfrc2/Flexget,dsemi/Flexget,Danfocus/Flexget,thalamus/Flexget,JorisDeRieck/Flexget,crawln45/Flexget,antivirtel/Flexget,ibrahimkarahan/Flexget,poulpito/Flexget,voriux/Flexget,drwyrm/Flexget,Pretagonist/Flexget,qk4l/Flexget,patsissons/Flexget,JorisDeRieck/Flexget,malkavi/Flexget,X-dark/Flexget,JorisDeRieck/Flexget,tsnoam/Flexget,spencerjanssen/Flexget,tsnoam/Flexget,tvcsantos/Flexget,LynxyssCZ/Flexget,thalamus/Flexget,patsissons/Flexget,qvazzler/Flexget,Flexget/Flexget,OmgOhnoes/Flexget,X-dark/Flexget,qk4l/Flexget,sean797/Flexget,xfouloux/Flexget,cvium/Flexget,ianstalk/Flexget,asm0dey/Flexget,JorisDeRieck/Flexget,malkavi/Flexget,oxc/Flexget,qvazzler/Flexget,ibrahimkarahan/Flexget,Danfocus/Flexget,patsissons/Flexget,crawln45/Flexget,Pretagonist/Flexget,lildadou/Flexget,ibrahimkarahan/Flexget,tarzasai/Flexget,ratoaq2/Flexget,xfouloux/Flexget,vfrc2/Flexget,Danfocus/Flexget,ZefQ/Flexget,malkavi/Flexget,v17al/Flexget,OmgOhnoes/Flexget,tobinjt/Flexget,LynxyssCZ/Flexget,sean797/Flexget,voriux/Flexget,qk4l/Flexget,tvcsantos/Flexget,Pretagonist/Flexget,jacobmetrick/Flexget,cvium/Flexget,ratoaq2/Flexget,ZefQ/Flexget,sean797/Flexget,asm0dey/Flexget,dsemi/Flexget,X-dark/Flexget,jawilson/Flexget,tobinjt/Flexget,offbyone/Flexget,thalamus/Flexget,crawln45/Flexget,asm0dey/Flexget,gazpachoking/Flexget,Danfocus/Flexget,ratoaq2/Flexget,LynxyssCZ/Flexget,OmgOhnoes/Flexget,grrr2/Flexget,ianstalk/Flexget,offbyone/Flexget,grrr2/Flexget,jawilson/Flexget,spencerjanssen/Flexget,camon/Flexget,vfrc2/Flexget,ZefQ/Flexget,drwyrm/Flexget,jacobmetrick/Flexget,spencerjanssen/Flexget,cvium/Flexget,xfouloux/Flexget,jawilson/Flexget,tobinjt/Flexget,antivirtel/Flexget,tarzasai/Flexget,tobinjt/Flexget,dsemi/Flexget,jacobmetrick/Flexget,lildadou/Flexget,crawln45/Flexget,v17al/Flexget,drwyrm/Flexget,grrr2/Flexget,v17al/Flexget,LynxyssCZ/Flexget,tsnoam/Flexget,oxc/Flexget,Flexget/Flexget,tarzasai/Flexget,oxc/Flexget,malkavi/Flexget,poulpito/Flexget,ianstalk/Flexget,qvazzler/Flexget,Flexget/Flexget,offbyone/Flexget,jawilson/Flexget,poulpito/Flexget,antivirtel/Flexget,lildadou/Flexget,gazpachoking/Flexget,camon/Flexget,Flexget/Flexget
|
from __future__ import unicode_literals, division, absolute_import
import logging
from sqlalchemy import desc, func
from flexget.manager import Session
from flexget.plugin import register_plugin, priority
from flexget.plugins.filter.series import SeriesDatabase, Series, Episode
from flexget.utils.tools import multiply_timedelta
log = logging.getLogger('est_series')
class EstimatesReleasedSeries(SeriesDatabase):
@priority(0) # Run only if better online lookups fail
def estimate(self, entry):
if all(field in entry for field in ['series_name', 'series_season', 'series_episode']):
session = Session()
series = session.query(Series).filter(Series.name == entry['series_name']).first()
if not series:
return
episodes = (session.query(Episode).join(Series).
filter(Episode.season != None).
filter(Series.id == series.id).
filter(Episode.season == func.max(Episode.season).select()).
order_by(desc(Episode.number)).limit(2).all())
if len(episodes) < 2:
return
last_diff = episodes[0].first_seen - episodes[1].first_seen
return episodes[0].first_seen + multiply_timedelta(last_diff, 0.9)
# TODO: Some fancier logic? Season break estimates?
register_plugin(EstimatesReleasedSeries, 'est_released_series', groups=['estimate_release'])
Raise DependencyError when series plugin is missing
|
from __future__ import unicode_literals, division, absolute_import
import logging
from sqlalchemy import desc, func
from flexget.manager import Session
from flexget.plugin import register_plugin, priority, DependencyError
from flexget.utils.tools import multiply_timedelta
try:
from flexget.plugins.filter.series import Series, Episode
except ImportError:
raise DependencyError(issued_by='est_released_series', missing='series plugin', silent=True)
log = logging.getLogger('est_series')
class EstimatesReleasedSeries(object):
@priority(0) # Run only if better online lookups fail
def estimate(self, entry):
if all(field in entry for field in ['series_name', 'series_season', 'series_episode']):
session = Session()
series = session.query(Series).filter(Series.name == entry['series_name']).first()
if not series:
return
episodes = (session.query(Episode).join(Series).
filter(Episode.season != None).
filter(Series.id == series.id).
filter(Episode.season == func.max(Episode.season).select()).
order_by(desc(Episode.number)).limit(2).all())
if len(episodes) < 2:
return
last_diff = episodes[0].first_seen - episodes[1].first_seen
return episodes[0].first_seen + multiply_timedelta(last_diff, 0.9)
# TODO: Some fancier logic? Season break estimates?
register_plugin(EstimatesReleasedSeries, 'est_released_series', groups=['estimate_release'])
|
<commit_before>from __future__ import unicode_literals, division, absolute_import
import logging
from sqlalchemy import desc, func
from flexget.manager import Session
from flexget.plugin import register_plugin, priority
from flexget.plugins.filter.series import SeriesDatabase, Series, Episode
from flexget.utils.tools import multiply_timedelta
log = logging.getLogger('est_series')
class EstimatesReleasedSeries(SeriesDatabase):
@priority(0) # Run only if better online lookups fail
def estimate(self, entry):
if all(field in entry for field in ['series_name', 'series_season', 'series_episode']):
session = Session()
series = session.query(Series).filter(Series.name == entry['series_name']).first()
if not series:
return
episodes = (session.query(Episode).join(Series).
filter(Episode.season != None).
filter(Series.id == series.id).
filter(Episode.season == func.max(Episode.season).select()).
order_by(desc(Episode.number)).limit(2).all())
if len(episodes) < 2:
return
last_diff = episodes[0].first_seen - episodes[1].first_seen
return episodes[0].first_seen + multiply_timedelta(last_diff, 0.9)
# TODO: Some fancier logic? Season break estimates?
register_plugin(EstimatesReleasedSeries, 'est_released_series', groups=['estimate_release'])
<commit_msg>Raise DependencyError when series plugin is missing<commit_after>
|
from __future__ import unicode_literals, division, absolute_import
import logging
from sqlalchemy import desc, func
from flexget.manager import Session
from flexget.plugin import register_plugin, priority, DependencyError
from flexget.utils.tools import multiply_timedelta
try:
from flexget.plugins.filter.series import Series, Episode
except ImportError:
raise DependencyError(issued_by='est_released_series', missing='series plugin', silent=True)
log = logging.getLogger('est_series')
class EstimatesReleasedSeries(object):
@priority(0) # Run only if better online lookups fail
def estimate(self, entry):
if all(field in entry for field in ['series_name', 'series_season', 'series_episode']):
session = Session()
series = session.query(Series).filter(Series.name == entry['series_name']).first()
if not series:
return
episodes = (session.query(Episode).join(Series).
filter(Episode.season != None).
filter(Series.id == series.id).
filter(Episode.season == func.max(Episode.season).select()).
order_by(desc(Episode.number)).limit(2).all())
if len(episodes) < 2:
return
last_diff = episodes[0].first_seen - episodes[1].first_seen
return episodes[0].first_seen + multiply_timedelta(last_diff, 0.9)
# TODO: Some fancier logic? Season break estimates?
register_plugin(EstimatesReleasedSeries, 'est_released_series', groups=['estimate_release'])
|
from __future__ import unicode_literals, division, absolute_import
import logging
from sqlalchemy import desc, func
from flexget.manager import Session
from flexget.plugin import register_plugin, priority
from flexget.plugins.filter.series import SeriesDatabase, Series, Episode
from flexget.utils.tools import multiply_timedelta
log = logging.getLogger('est_series')
class EstimatesReleasedSeries(SeriesDatabase):
@priority(0) # Run only if better online lookups fail
def estimate(self, entry):
if all(field in entry for field in ['series_name', 'series_season', 'series_episode']):
session = Session()
series = session.query(Series).filter(Series.name == entry['series_name']).first()
if not series:
return
episodes = (session.query(Episode).join(Series).
filter(Episode.season != None).
filter(Series.id == series.id).
filter(Episode.season == func.max(Episode.season).select()).
order_by(desc(Episode.number)).limit(2).all())
if len(episodes) < 2:
return
last_diff = episodes[0].first_seen - episodes[1].first_seen
return episodes[0].first_seen + multiply_timedelta(last_diff, 0.9)
# TODO: Some fancier logic? Season break estimates?
register_plugin(EstimatesReleasedSeries, 'est_released_series', groups=['estimate_release'])
Raise DependencyError when series plugin is missingfrom __future__ import unicode_literals, division, absolute_import
import logging
from sqlalchemy import desc, func
from flexget.manager import Session
from flexget.plugin import register_plugin, priority, DependencyError
from flexget.utils.tools import multiply_timedelta
try:
from flexget.plugins.filter.series import Series, Episode
except ImportError:
raise DependencyError(issued_by='est_released_series', missing='series plugin', silent=True)
log = logging.getLogger('est_series')
class EstimatesReleasedSeries(object):
@priority(0) # Run only if better online lookups fail
def estimate(self, entry):
if all(field in entry for field in ['series_name', 'series_season', 'series_episode']):
session = Session()
series = session.query(Series).filter(Series.name == entry['series_name']).first()
if not series:
return
episodes = (session.query(Episode).join(Series).
filter(Episode.season != None).
filter(Series.id == series.id).
filter(Episode.season == func.max(Episode.season).select()).
order_by(desc(Episode.number)).limit(2).all())
if len(episodes) < 2:
return
last_diff = episodes[0].first_seen - episodes[1].first_seen
return episodes[0].first_seen + multiply_timedelta(last_diff, 0.9)
# TODO: Some fancier logic? Season break estimates?
register_plugin(EstimatesReleasedSeries, 'est_released_series', groups=['estimate_release'])
|
<commit_before>from __future__ import unicode_literals, division, absolute_import
import logging
from sqlalchemy import desc, func
from flexget.manager import Session
from flexget.plugin import register_plugin, priority
from flexget.plugins.filter.series import SeriesDatabase, Series, Episode
from flexget.utils.tools import multiply_timedelta
log = logging.getLogger('est_series')
class EstimatesReleasedSeries(SeriesDatabase):
@priority(0) # Run only if better online lookups fail
def estimate(self, entry):
if all(field in entry for field in ['series_name', 'series_season', 'series_episode']):
session = Session()
series = session.query(Series).filter(Series.name == entry['series_name']).first()
if not series:
return
episodes = (session.query(Episode).join(Series).
filter(Episode.season != None).
filter(Series.id == series.id).
filter(Episode.season == func.max(Episode.season).select()).
order_by(desc(Episode.number)).limit(2).all())
if len(episodes) < 2:
return
last_diff = episodes[0].first_seen - episodes[1].first_seen
return episodes[0].first_seen + multiply_timedelta(last_diff, 0.9)
# TODO: Some fancier logic? Season break estimates?
register_plugin(EstimatesReleasedSeries, 'est_released_series', groups=['estimate_release'])
<commit_msg>Raise DependencyError when series plugin is missing<commit_after>from __future__ import unicode_literals, division, absolute_import
import logging
from sqlalchemy import desc, func
from flexget.manager import Session
from flexget.plugin import register_plugin, priority, DependencyError
from flexget.utils.tools import multiply_timedelta
try:
from flexget.plugins.filter.series import Series, Episode
except ImportError:
raise DependencyError(issued_by='est_released_series', missing='series plugin', silent=True)
log = logging.getLogger('est_series')
class EstimatesReleasedSeries(object):
@priority(0) # Run only if better online lookups fail
def estimate(self, entry):
if all(field in entry for field in ['series_name', 'series_season', 'series_episode']):
session = Session()
series = session.query(Series).filter(Series.name == entry['series_name']).first()
if not series:
return
episodes = (session.query(Episode).join(Series).
filter(Episode.season != None).
filter(Series.id == series.id).
filter(Episode.season == func.max(Episode.season).select()).
order_by(desc(Episode.number)).limit(2).all())
if len(episodes) < 2:
return
last_diff = episodes[0].first_seen - episodes[1].first_seen
return episodes[0].first_seen + multiply_timedelta(last_diff, 0.9)
# TODO: Some fancier logic? Season break estimates?
register_plugin(EstimatesReleasedSeries, 'est_released_series', groups=['estimate_release'])
|
04a8da721f246382967c4e834ffac8a54506ae94
|
test/test_text_utils.py
|
test/test_text_utils.py
|
"""
Tests for text_utils.py
part of https://github.com/evil-mad/plotink
"""
import unittest
from plotink import text_utils
# python -m unittest discover in top-level package dir
# pylint: disable=too-many-public-methods
class TextUtilsTestCase(unittest.TestCase):
"""
Unit tests for text_utils.py
"""
def test_format_hms(self):
""" test format_hms function """
self.assertEqual(text_utils.format_hms(3600),
'1:00:00 (Hours, minutes, seconds)')
self.assertEqual(text_utils.format_hms(3600.00),
'1:00:00 (Hours, minutes, seconds)')
self.assertEqual(text_utils.format_hms(3600.00, False),
'1:00:00 (Hours, minutes, seconds)')
self.assertEqual(text_utils.format_hms(3600.00, milliseconds=False),
'1:00:00 (Hours, minutes, seconds)')
self.assertEqual(text_utils.format_hms(3600.00, milliseconds=True),
'3.600 Seconds')
self.assertEqual(text_utils.format_hms(12345.67890, True),
'12 Seconds')
self.assertEqual(text_utils.format_hms(12345.67890, milliseconds=True),
'12 Seconds')
self.assertEqual(text_utils.format_hms(12345.67890),
'3:25:46 (Hours, minutes, seconds)')
self.assertEqual(text_utils.format_hms(1.23456),
'1.235 Seconds')
self.assertEqual(text_utils.format_hms(3.123456),
'3.123 Seconds')
self.assertEqual(text_utils.format_hms(65),
'1:05 (Minutes, seconds)')
self.assertEqual(text_utils.format_hms(3500),
'58:20 (Minutes, seconds)')
self.assertEqual(text_utils.format_hms(1.0456e7),
'2904:26:40 (Hours, minutes, seconds)')
def test_xml_escape(self):
""" test format_hms function """
self.assertEqual(text_utils.xml_escape("Hello&Goodbye"),
'Hello&Goodbye')
self.assertEqual(text_utils.xml_escape("<test>"),
'<test>')
self.assertEqual(text_utils.xml_escape("The sun's out today"),
'The sun's out today')
self.assertEqual(text_utils.xml_escape('"Lemon Pie"'),
'"Lemon Pie"')
|
Add unit tests for text_utils
|
Add unit tests for text_utils
|
Python
|
mit
|
evil-mad/plotink
|
Add unit tests for text_utils
|
"""
Tests for text_utils.py
part of https://github.com/evil-mad/plotink
"""
import unittest
from plotink import text_utils
# python -m unittest discover in top-level package dir
# pylint: disable=too-many-public-methods
class TextUtilsTestCase(unittest.TestCase):
"""
Unit tests for text_utils.py
"""
def test_format_hms(self):
""" test format_hms function """
self.assertEqual(text_utils.format_hms(3600),
'1:00:00 (Hours, minutes, seconds)')
self.assertEqual(text_utils.format_hms(3600.00),
'1:00:00 (Hours, minutes, seconds)')
self.assertEqual(text_utils.format_hms(3600.00, False),
'1:00:00 (Hours, minutes, seconds)')
self.assertEqual(text_utils.format_hms(3600.00, milliseconds=False),
'1:00:00 (Hours, minutes, seconds)')
self.assertEqual(text_utils.format_hms(3600.00, milliseconds=True),
'3.600 Seconds')
self.assertEqual(text_utils.format_hms(12345.67890, True),
'12 Seconds')
self.assertEqual(text_utils.format_hms(12345.67890, milliseconds=True),
'12 Seconds')
self.assertEqual(text_utils.format_hms(12345.67890),
'3:25:46 (Hours, minutes, seconds)')
self.assertEqual(text_utils.format_hms(1.23456),
'1.235 Seconds')
self.assertEqual(text_utils.format_hms(3.123456),
'3.123 Seconds')
self.assertEqual(text_utils.format_hms(65),
'1:05 (Minutes, seconds)')
self.assertEqual(text_utils.format_hms(3500),
'58:20 (Minutes, seconds)')
self.assertEqual(text_utils.format_hms(1.0456e7),
'2904:26:40 (Hours, minutes, seconds)')
def test_xml_escape(self):
""" test format_hms function """
self.assertEqual(text_utils.xml_escape("Hello&Goodbye"),
'Hello&Goodbye')
self.assertEqual(text_utils.xml_escape("<test>"),
'<test>')
self.assertEqual(text_utils.xml_escape("The sun's out today"),
'The sun's out today')
self.assertEqual(text_utils.xml_escape('"Lemon Pie"'),
'"Lemon Pie"')
|
<commit_before><commit_msg>Add unit tests for text_utils<commit_after>
|
"""
Tests for text_utils.py
part of https://github.com/evil-mad/plotink
"""
import unittest
from plotink import text_utils
# python -m unittest discover in top-level package dir
# pylint: disable=too-many-public-methods
class TextUtilsTestCase(unittest.TestCase):
"""
Unit tests for text_utils.py
"""
def test_format_hms(self):
""" test format_hms function """
self.assertEqual(text_utils.format_hms(3600),
'1:00:00 (Hours, minutes, seconds)')
self.assertEqual(text_utils.format_hms(3600.00),
'1:00:00 (Hours, minutes, seconds)')
self.assertEqual(text_utils.format_hms(3600.00, False),
'1:00:00 (Hours, minutes, seconds)')
self.assertEqual(text_utils.format_hms(3600.00, milliseconds=False),
'1:00:00 (Hours, minutes, seconds)')
self.assertEqual(text_utils.format_hms(3600.00, milliseconds=True),
'3.600 Seconds')
self.assertEqual(text_utils.format_hms(12345.67890, True),
'12 Seconds')
self.assertEqual(text_utils.format_hms(12345.67890, milliseconds=True),
'12 Seconds')
self.assertEqual(text_utils.format_hms(12345.67890),
'3:25:46 (Hours, minutes, seconds)')
self.assertEqual(text_utils.format_hms(1.23456),
'1.235 Seconds')
self.assertEqual(text_utils.format_hms(3.123456),
'3.123 Seconds')
self.assertEqual(text_utils.format_hms(65),
'1:05 (Minutes, seconds)')
self.assertEqual(text_utils.format_hms(3500),
'58:20 (Minutes, seconds)')
self.assertEqual(text_utils.format_hms(1.0456e7),
'2904:26:40 (Hours, minutes, seconds)')
def test_xml_escape(self):
""" test format_hms function """
self.assertEqual(text_utils.xml_escape("Hello&Goodbye"),
'Hello&Goodbye')
self.assertEqual(text_utils.xml_escape("<test>"),
'<test>')
self.assertEqual(text_utils.xml_escape("The sun's out today"),
'The sun's out today')
self.assertEqual(text_utils.xml_escape('"Lemon Pie"'),
'"Lemon Pie"')
|
Add unit tests for text_utils"""
Tests for text_utils.py
part of https://github.com/evil-mad/plotink
"""
import unittest
from plotink import text_utils
# python -m unittest discover in top-level package dir
# pylint: disable=too-many-public-methods
class TextUtilsTestCase(unittest.TestCase):
"""
Unit tests for text_utils.py
"""
def test_format_hms(self):
""" test format_hms function """
self.assertEqual(text_utils.format_hms(3600),
'1:00:00 (Hours, minutes, seconds)')
self.assertEqual(text_utils.format_hms(3600.00),
'1:00:00 (Hours, minutes, seconds)')
self.assertEqual(text_utils.format_hms(3600.00, False),
'1:00:00 (Hours, minutes, seconds)')
self.assertEqual(text_utils.format_hms(3600.00, milliseconds=False),
'1:00:00 (Hours, minutes, seconds)')
self.assertEqual(text_utils.format_hms(3600.00, milliseconds=True),
'3.600 Seconds')
self.assertEqual(text_utils.format_hms(12345.67890, True),
'12 Seconds')
self.assertEqual(text_utils.format_hms(12345.67890, milliseconds=True),
'12 Seconds')
self.assertEqual(text_utils.format_hms(12345.67890),
'3:25:46 (Hours, minutes, seconds)')
self.assertEqual(text_utils.format_hms(1.23456),
'1.235 Seconds')
self.assertEqual(text_utils.format_hms(3.123456),
'3.123 Seconds')
self.assertEqual(text_utils.format_hms(65),
'1:05 (Minutes, seconds)')
self.assertEqual(text_utils.format_hms(3500),
'58:20 (Minutes, seconds)')
self.assertEqual(text_utils.format_hms(1.0456e7),
'2904:26:40 (Hours, minutes, seconds)')
def test_xml_escape(self):
""" test format_hms function """
self.assertEqual(text_utils.xml_escape("Hello&Goodbye"),
'Hello&Goodbye')
self.assertEqual(text_utils.xml_escape("<test>"),
'<test>')
self.assertEqual(text_utils.xml_escape("The sun's out today"),
'The sun's out today')
self.assertEqual(text_utils.xml_escape('"Lemon Pie"'),
'"Lemon Pie"')
|
<commit_before><commit_msg>Add unit tests for text_utils<commit_after>"""
Tests for text_utils.py
part of https://github.com/evil-mad/plotink
"""
import unittest
from plotink import text_utils
# python -m unittest discover in top-level package dir
# pylint: disable=too-many-public-methods
class TextUtilsTestCase(unittest.TestCase):
"""
Unit tests for text_utils.py
"""
def test_format_hms(self):
""" test format_hms function """
self.assertEqual(text_utils.format_hms(3600),
'1:00:00 (Hours, minutes, seconds)')
self.assertEqual(text_utils.format_hms(3600.00),
'1:00:00 (Hours, minutes, seconds)')
self.assertEqual(text_utils.format_hms(3600.00, False),
'1:00:00 (Hours, minutes, seconds)')
self.assertEqual(text_utils.format_hms(3600.00, milliseconds=False),
'1:00:00 (Hours, minutes, seconds)')
self.assertEqual(text_utils.format_hms(3600.00, milliseconds=True),
'3.600 Seconds')
self.assertEqual(text_utils.format_hms(12345.67890, True),
'12 Seconds')
self.assertEqual(text_utils.format_hms(12345.67890, milliseconds=True),
'12 Seconds')
self.assertEqual(text_utils.format_hms(12345.67890),
'3:25:46 (Hours, minutes, seconds)')
self.assertEqual(text_utils.format_hms(1.23456),
'1.235 Seconds')
self.assertEqual(text_utils.format_hms(3.123456),
'3.123 Seconds')
self.assertEqual(text_utils.format_hms(65),
'1:05 (Minutes, seconds)')
self.assertEqual(text_utils.format_hms(3500),
'58:20 (Minutes, seconds)')
self.assertEqual(text_utils.format_hms(1.0456e7),
'2904:26:40 (Hours, minutes, seconds)')
def test_xml_escape(self):
""" test format_hms function """
self.assertEqual(text_utils.xml_escape("Hello&Goodbye"),
'Hello&Goodbye')
self.assertEqual(text_utils.xml_escape("<test>"),
'<test>')
self.assertEqual(text_utils.xml_escape("The sun's out today"),
'The sun's out today')
self.assertEqual(text_utils.xml_escape('"Lemon Pie"'),
'"Lemon Pie"')
|
|
7b0d2caf4b2a0ed5074d847166b0868a4a2d5d7c
|
scripts/split_indexed.py
|
scripts/split_indexed.py
|
import argparse
import os
import struct
import tqdm
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input', required=True)
parser.add_argument('--num-pieces', required=True, type=int)
args = parser.parse_args()
root, ext = os.path.splitext(args.input)
index = []
with open(args.input + '.index') as index_file:
while True:
offset = index_file.read(8)
if not offset:
break
offset, = struct.unpack('<Q', offset)
index.append(offset)
num_elems = len(index)
pieces_num_elems = [num_elems // args.num_pieces] * args.num_pieces
pieces_num_elems[0] += num_elems - sum(pieces_num_elems)
index.append(os.stat(args.input).st_size)
input_file = open(args.input)
index_offset = 0
for i, piece_num_elems in tqdm.tqdm(enumerate(pieces_num_elems)):
piece_name = '{}-{:03d}-of-{:03d}{}'.format(
root, i, args.num_pieces, ext)
piece_start = index[index_offset]
piece_end = index[index_offset + piece_num_elems]
piece_size = piece_end - piece_start
input_file.seek(piece_start)
with open(piece_name, 'w') as output_file:
total_written = 0
while total_written < piece_size:
chunk = input_file.read(
min(1024768, piece_size - total_written))
assert chunk, 'EOF reached unexpectedly'
output_file.write(chunk)
total_written += len(chunk)
piece_index = [
v - piece_start
for v in index[index_offset:index_offset + piece_num_elems]
]
with open(piece_name + '.index', 'w') as index_file:
for v in piece_index:
index_file.write(struct.pack('<Q', v))
index_offset += piece_num_elems
|
Add script to split indexed files
|
Add script to split indexed files
|
Python
|
apache-2.0
|
nearai/program_synthesis,nearai/program_synthesis
|
Add script to split indexed files
|
import argparse
import os
import struct
import tqdm
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input', required=True)
parser.add_argument('--num-pieces', required=True, type=int)
args = parser.parse_args()
root, ext = os.path.splitext(args.input)
index = []
with open(args.input + '.index') as index_file:
while True:
offset = index_file.read(8)
if not offset:
break
offset, = struct.unpack('<Q', offset)
index.append(offset)
num_elems = len(index)
pieces_num_elems = [num_elems // args.num_pieces] * args.num_pieces
pieces_num_elems[0] += num_elems - sum(pieces_num_elems)
index.append(os.stat(args.input).st_size)
input_file = open(args.input)
index_offset = 0
for i, piece_num_elems in tqdm.tqdm(enumerate(pieces_num_elems)):
piece_name = '{}-{:03d}-of-{:03d}{}'.format(
root, i, args.num_pieces, ext)
piece_start = index[index_offset]
piece_end = index[index_offset + piece_num_elems]
piece_size = piece_end - piece_start
input_file.seek(piece_start)
with open(piece_name, 'w') as output_file:
total_written = 0
while total_written < piece_size:
chunk = input_file.read(
min(1024768, piece_size - total_written))
assert chunk, 'EOF reached unexpectedly'
output_file.write(chunk)
total_written += len(chunk)
piece_index = [
v - piece_start
for v in index[index_offset:index_offset + piece_num_elems]
]
with open(piece_name + '.index', 'w') as index_file:
for v in piece_index:
index_file.write(struct.pack('<Q', v))
index_offset += piece_num_elems
|
<commit_before><commit_msg>Add script to split indexed files<commit_after>
|
import argparse
import os
import struct
import tqdm
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input', required=True)
parser.add_argument('--num-pieces', required=True, type=int)
args = parser.parse_args()
root, ext = os.path.splitext(args.input)
index = []
with open(args.input + '.index') as index_file:
while True:
offset = index_file.read(8)
if not offset:
break
offset, = struct.unpack('<Q', offset)
index.append(offset)
num_elems = len(index)
pieces_num_elems = [num_elems // args.num_pieces] * args.num_pieces
pieces_num_elems[0] += num_elems - sum(pieces_num_elems)
index.append(os.stat(args.input).st_size)
input_file = open(args.input)
index_offset = 0
for i, piece_num_elems in tqdm.tqdm(enumerate(pieces_num_elems)):
piece_name = '{}-{:03d}-of-{:03d}{}'.format(
root, i, args.num_pieces, ext)
piece_start = index[index_offset]
piece_end = index[index_offset + piece_num_elems]
piece_size = piece_end - piece_start
input_file.seek(piece_start)
with open(piece_name, 'w') as output_file:
total_written = 0
while total_written < piece_size:
chunk = input_file.read(
min(1024768, piece_size - total_written))
assert chunk, 'EOF reached unexpectedly'
output_file.write(chunk)
total_written += len(chunk)
piece_index = [
v - piece_start
for v in index[index_offset:index_offset + piece_num_elems]
]
with open(piece_name + '.index', 'w') as index_file:
for v in piece_index:
index_file.write(struct.pack('<Q', v))
index_offset += piece_num_elems
|
Add script to split indexed filesimport argparse
import os
import struct
import tqdm
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input', required=True)
parser.add_argument('--num-pieces', required=True, type=int)
args = parser.parse_args()
root, ext = os.path.splitext(args.input)
index = []
with open(args.input + '.index') as index_file:
while True:
offset = index_file.read(8)
if not offset:
break
offset, = struct.unpack('<Q', offset)
index.append(offset)
num_elems = len(index)
pieces_num_elems = [num_elems // args.num_pieces] * args.num_pieces
pieces_num_elems[0] += num_elems - sum(pieces_num_elems)
index.append(os.stat(args.input).st_size)
input_file = open(args.input)
index_offset = 0
for i, piece_num_elems in tqdm.tqdm(enumerate(pieces_num_elems)):
piece_name = '{}-{:03d}-of-{:03d}{}'.format(
root, i, args.num_pieces, ext)
piece_start = index[index_offset]
piece_end = index[index_offset + piece_num_elems]
piece_size = piece_end - piece_start
input_file.seek(piece_start)
with open(piece_name, 'w') as output_file:
total_written = 0
while total_written < piece_size:
chunk = input_file.read(
min(1024768, piece_size - total_written))
assert chunk, 'EOF reached unexpectedly'
output_file.write(chunk)
total_written += len(chunk)
piece_index = [
v - piece_start
for v in index[index_offset:index_offset + piece_num_elems]
]
with open(piece_name + '.index', 'w') as index_file:
for v in piece_index:
index_file.write(struct.pack('<Q', v))
index_offset += piece_num_elems
|
<commit_before><commit_msg>Add script to split indexed files<commit_after>import argparse
import os
import struct
import tqdm
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input', required=True)
parser.add_argument('--num-pieces', required=True, type=int)
args = parser.parse_args()
root, ext = os.path.splitext(args.input)
index = []
with open(args.input + '.index') as index_file:
while True:
offset = index_file.read(8)
if not offset:
break
offset, = struct.unpack('<Q', offset)
index.append(offset)
num_elems = len(index)
pieces_num_elems = [num_elems // args.num_pieces] * args.num_pieces
pieces_num_elems[0] += num_elems - sum(pieces_num_elems)
index.append(os.stat(args.input).st_size)
input_file = open(args.input)
index_offset = 0
for i, piece_num_elems in tqdm.tqdm(enumerate(pieces_num_elems)):
piece_name = '{}-{:03d}-of-{:03d}{}'.format(
root, i, args.num_pieces, ext)
piece_start = index[index_offset]
piece_end = index[index_offset + piece_num_elems]
piece_size = piece_end - piece_start
input_file.seek(piece_start)
with open(piece_name, 'w') as output_file:
total_written = 0
while total_written < piece_size:
chunk = input_file.read(
min(1024768, piece_size - total_written))
assert chunk, 'EOF reached unexpectedly'
output_file.write(chunk)
total_written += len(chunk)
piece_index = [
v - piece_start
for v in index[index_offset:index_offset + piece_num_elems]
]
with open(piece_name + '.index', 'w') as index_file:
for v in piece_index:
index_file.write(struct.pack('<Q', v))
index_offset += piece_num_elems
|
|
a42c4423748171c5611d82a101f3b70741ff75db
|
tests/test_increment.py
|
tests/test_increment.py
|
from gypsy import increment
def test_increment_basal_area_pl():
args = []
expected = None
result = increment.increment_basal_area_pl(*args)
assert result == expected
# TODO: known value
# TODO: 0 as an input
# TODO: negative as an input
def test_increment_basal_area_aw():
args = []
expected = None
result = increment.increment_basal_area_aw(*args)
assert result == expected
# TODO: known value
# TODO: 0 as an input
# TODO: negative as an input
def test_increment_basal_area_sb():
args = []
expected = None
result = increment.increment_basal_area_sb(*args)
assert result == expected
# TODO: known value
# TODO: 0 as an input
# TODO: negative as an input
def test_increment_basal_area_sw():
args = []
expected = None
result = increment.increment_basal_area_sw(*args)
assert result == expected
# TODO: known value
# TODO: 0 as an input
# TODO: negative as an input
|
Add boilerplate for increment tests
|
Add boilerplate for increment tests
These would have been very helpful when introducing cython for increment
functions. It is a lot easier to debug at this fine grained level than
in wrapping functions.
|
Python
|
mit
|
tesera/pygypsy,tesera/pygypsy
|
Add boilerplate for increment tests
These would have been very helpful when introducing cython for increment
functions. It is a lot easier to debug at this fine grained level than
in wrapping functions.
|
from gypsy import increment
def test_increment_basal_area_pl():
args = []
expected = None
result = increment.increment_basal_area_pl(*args)
assert result == expected
# TODO: known value
# TODO: 0 as an input
# TODO: negative as an input
def test_increment_basal_area_aw():
args = []
expected = None
result = increment.increment_basal_area_aw(*args)
assert result == expected
# TODO: known value
# TODO: 0 as an input
# TODO: negative as an input
def test_increment_basal_area_sb():
args = []
expected = None
result = increment.increment_basal_area_sb(*args)
assert result == expected
# TODO: known value
# TODO: 0 as an input
# TODO: negative as an input
def test_increment_basal_area_sw():
args = []
expected = None
result = increment.increment_basal_area_sw(*args)
assert result == expected
# TODO: known value
# TODO: 0 as an input
# TODO: negative as an input
|
<commit_before><commit_msg>Add boilerplate for increment tests
These would have been very helpful when introducing cython for increment
functions. It is a lot easier to debug at this fine grained level than
in wrapping functions.<commit_after>
|
from gypsy import increment
def test_increment_basal_area_pl():
args = []
expected = None
result = increment.increment_basal_area_pl(*args)
assert result == expected
# TODO: known value
# TODO: 0 as an input
# TODO: negative as an input
def test_increment_basal_area_aw():
args = []
expected = None
result = increment.increment_basal_area_aw(*args)
assert result == expected
# TODO: known value
# TODO: 0 as an input
# TODO: negative as an input
def test_increment_basal_area_sb():
args = []
expected = None
result = increment.increment_basal_area_sb(*args)
assert result == expected
# TODO: known value
# TODO: 0 as an input
# TODO: negative as an input
def test_increment_basal_area_sw():
args = []
expected = None
result = increment.increment_basal_area_sw(*args)
assert result == expected
# TODO: known value
# TODO: 0 as an input
# TODO: negative as an input
|
Add boilerplate for increment tests
These would have been very helpful when introducing cython for increment
functions. It is a lot easier to debug at this fine grained level than
in wrapping functions.from gypsy import increment
def test_increment_basal_area_pl():
args = []
expected = None
result = increment.increment_basal_area_pl(*args)
assert result == expected
# TODO: known value
# TODO: 0 as an input
# TODO: negative as an input
def test_increment_basal_area_aw():
args = []
expected = None
result = increment.increment_basal_area_aw(*args)
assert result == expected
# TODO: known value
# TODO: 0 as an input
# TODO: negative as an input
def test_increment_basal_area_sb():
args = []
expected = None
result = increment.increment_basal_area_sb(*args)
assert result == expected
# TODO: known value
# TODO: 0 as an input
# TODO: negative as an input
def test_increment_basal_area_sw():
args = []
expected = None
result = increment.increment_basal_area_sw(*args)
assert result == expected
# TODO: known value
# TODO: 0 as an input
# TODO: negative as an input
|
<commit_before><commit_msg>Add boilerplate for increment tests
These would have been very helpful when introducing cython for increment
functions. It is a lot easier to debug at this fine grained level than
in wrapping functions.<commit_after>from gypsy import increment
def test_increment_basal_area_pl():
args = []
expected = None
result = increment.increment_basal_area_pl(*args)
assert result == expected
# TODO: known value
# TODO: 0 as an input
# TODO: negative as an input
def test_increment_basal_area_aw():
args = []
expected = None
result = increment.increment_basal_area_aw(*args)
assert result == expected
# TODO: known value
# TODO: 0 as an input
# TODO: negative as an input
def test_increment_basal_area_sb():
args = []
expected = None
result = increment.increment_basal_area_sb(*args)
assert result == expected
# TODO: known value
# TODO: 0 as an input
# TODO: negative as an input
def test_increment_basal_area_sw():
args = []
expected = None
result = increment.increment_basal_area_sw(*args)
assert result == expected
# TODO: known value
# TODO: 0 as an input
# TODO: negative as an input
|
|
885abeff9859320a59d3afeb297b8138f9c7fa51
|
tools/audio_in_cards.py
|
tools/audio_in_cards.py
|
#!/usr/bin/env python
#
# Licensed under the BSD license. See full license in LICENSE file.
# http://www.lightshowpi.com/
#
# Author: Todd Giles (todd@lightshowpi.com)
"""Trivial wrapper around alsaaduio.cards() for getting a list of your audio cards.
Helpful in determining the list of USB audio cards in advanced audio-in setups.
https://bitbucket.org/togiles/lightshowpi/wiki/Audio-In%20Mode
Sample usage:
python audio_in_cards.py
"""
import alsaaudio as aa
if __name__ == "__main__":
aa.cards()
|
Update some comments on audio-in mode, including links to new wiki page. Also add tool wrapper around alsaaudio.cards()
|
Update some comments on audio-in mode, including links to new wiki page. Also add tool wrapper around alsaaudio.cards()
|
Python
|
bsd-2-clause
|
Cerberus98/lightshowpi,Cerberus98/lightshowpi,bradowen2011/lightshowpi,wheeldog515/lightshowPi,bradowen2011/lightshowpi,wheeldog515/lightshowPi
|
Update some comments on audio-in mode, including links to new wiki page. Also add tool wrapper around alsaaudio.cards()
|
#!/usr/bin/env python
#
# Licensed under the BSD license. See full license in LICENSE file.
# http://www.lightshowpi.com/
#
# Author: Todd Giles (todd@lightshowpi.com)
"""Trivial wrapper around alsaaduio.cards() for getting a list of your audio cards.
Helpful in determining the list of USB audio cards in advanced audio-in setups.
https://bitbucket.org/togiles/lightshowpi/wiki/Audio-In%20Mode
Sample usage:
python audio_in_cards.py
"""
import alsaaudio as aa
if __name__ == "__main__":
aa.cards()
|
<commit_before><commit_msg>Update some comments on audio-in mode, including links to new wiki page. Also add tool wrapper around alsaaudio.cards()<commit_after>
|
#!/usr/bin/env python
#
# Licensed under the BSD license. See full license in LICENSE file.
# http://www.lightshowpi.com/
#
# Author: Todd Giles (todd@lightshowpi.com)
"""Trivial wrapper around alsaaduio.cards() for getting a list of your audio cards.
Helpful in determining the list of USB audio cards in advanced audio-in setups.
https://bitbucket.org/togiles/lightshowpi/wiki/Audio-In%20Mode
Sample usage:
python audio_in_cards.py
"""
import alsaaudio as aa
if __name__ == "__main__":
aa.cards()
|
Update some comments on audio-in mode, including links to new wiki page. Also add tool wrapper around alsaaudio.cards()#!/usr/bin/env python
#
# Licensed under the BSD license. See full license in LICENSE file.
# http://www.lightshowpi.com/
#
# Author: Todd Giles (todd@lightshowpi.com)
"""Trivial wrapper around alsaaduio.cards() for getting a list of your audio cards.
Helpful in determining the list of USB audio cards in advanced audio-in setups.
https://bitbucket.org/togiles/lightshowpi/wiki/Audio-In%20Mode
Sample usage:
python audio_in_cards.py
"""
import alsaaudio as aa
if __name__ == "__main__":
aa.cards()
|
<commit_before><commit_msg>Update some comments on audio-in mode, including links to new wiki page. Also add tool wrapper around alsaaudio.cards()<commit_after>#!/usr/bin/env python
#
# Licensed under the BSD license. See full license in LICENSE file.
# http://www.lightshowpi.com/
#
# Author: Todd Giles (todd@lightshowpi.com)
"""Trivial wrapper around alsaaduio.cards() for getting a list of your audio cards.
Helpful in determining the list of USB audio cards in advanced audio-in setups.
https://bitbucket.org/togiles/lightshowpi/wiki/Audio-In%20Mode
Sample usage:
python audio_in_cards.py
"""
import alsaaudio as aa
if __name__ == "__main__":
aa.cards()
|
|
eada7c5b0a82f870fa4a0850481b28bc34eb25bd
|
ms2ldaviz/debug_motifdb.py
|
ms2ldaviz/debug_motifdb.py
|
import requests
server_url = 'http://ms2lda.org/motifdb/'
# server_url = 'http://localhost:8000/motifdb/'
"""Grabbing the latest Motifs from MS2LDA"""
motifset_dict = requests.get(server_url + 'list_motifsets/', verify=False).json()
db_list = []
db_list.append(2)
db_list.append(4)
db_list.append(1)
db_list.append(3)
db_list.append(5)
db_list.append(6)
db_list.append(16)
# Acquire motifset from MS2LDA.org
client = requests.session()
token_output = client.get(server_url + 'initialise_api/', verify=False).json()
token = token_output['token']
data = {'csrfmiddlewaretoken': token}
data['motifset_id_list'] = db_list
data['filter'] = 'True'
response = client.post(server_url + 'get_motifset/', data=data, verify=False)
json_output = response.json()
assert response.status_code == 200
assert len(json_output['motifs']) > 0
assert len(json_output['metadata'] > 0)
print('Success!')
|
Add a motifdb debug script
|
Add a motifdb debug script
|
Python
|
mit
|
sdrogers/ms2ldaviz,sdrogers/ms2ldaviz,sdrogers/ms2ldaviz,sdrogers/ms2ldaviz
|
Add a motifdb debug script
|
import requests
server_url = 'http://ms2lda.org/motifdb/'
# server_url = 'http://localhost:8000/motifdb/'
"""Grabbing the latest Motifs from MS2LDA"""
motifset_dict = requests.get(server_url + 'list_motifsets/', verify=False).json()
db_list = []
db_list.append(2)
db_list.append(4)
db_list.append(1)
db_list.append(3)
db_list.append(5)
db_list.append(6)
db_list.append(16)
# Acquire motifset from MS2LDA.org
client = requests.session()
token_output = client.get(server_url + 'initialise_api/', verify=False).json()
token = token_output['token']
data = {'csrfmiddlewaretoken': token}
data['motifset_id_list'] = db_list
data['filter'] = 'True'
response = client.post(server_url + 'get_motifset/', data=data, verify=False)
json_output = response.json()
assert response.status_code == 200
assert len(json_output['motifs']) > 0
assert len(json_output['metadata'] > 0)
print('Success!')
|
<commit_before><commit_msg>Add a motifdb debug script<commit_after>
|
import requests
server_url = 'http://ms2lda.org/motifdb/'
# server_url = 'http://localhost:8000/motifdb/'
"""Grabbing the latest Motifs from MS2LDA"""
motifset_dict = requests.get(server_url + 'list_motifsets/', verify=False).json()
db_list = []
db_list.append(2)
db_list.append(4)
db_list.append(1)
db_list.append(3)
db_list.append(5)
db_list.append(6)
db_list.append(16)
# Acquire motifset from MS2LDA.org
client = requests.session()
token_output = client.get(server_url + 'initialise_api/', verify=False).json()
token = token_output['token']
data = {'csrfmiddlewaretoken': token}
data['motifset_id_list'] = db_list
data['filter'] = 'True'
response = client.post(server_url + 'get_motifset/', data=data, verify=False)
json_output = response.json()
assert response.status_code == 200
assert len(json_output['motifs']) > 0
assert len(json_output['metadata'] > 0)
print('Success!')
|
Add a motifdb debug scriptimport requests
server_url = 'http://ms2lda.org/motifdb/'
# server_url = 'http://localhost:8000/motifdb/'
"""Grabbing the latest Motifs from MS2LDA"""
motifset_dict = requests.get(server_url + 'list_motifsets/', verify=False).json()
db_list = []
db_list.append(2)
db_list.append(4)
db_list.append(1)
db_list.append(3)
db_list.append(5)
db_list.append(6)
db_list.append(16)
# Acquire motifset from MS2LDA.org
client = requests.session()
token_output = client.get(server_url + 'initialise_api/', verify=False).json()
token = token_output['token']
data = {'csrfmiddlewaretoken': token}
data['motifset_id_list'] = db_list
data['filter'] = 'True'
response = client.post(server_url + 'get_motifset/', data=data, verify=False)
json_output = response.json()
assert response.status_code == 200
assert len(json_output['motifs']) > 0
assert len(json_output['metadata'] > 0)
print('Success!')
|
<commit_before><commit_msg>Add a motifdb debug script<commit_after>import requests
server_url = 'http://ms2lda.org/motifdb/'
# server_url = 'http://localhost:8000/motifdb/'
"""Grabbing the latest Motifs from MS2LDA"""
motifset_dict = requests.get(server_url + 'list_motifsets/', verify=False).json()
db_list = []
db_list.append(2)
db_list.append(4)
db_list.append(1)
db_list.append(3)
db_list.append(5)
db_list.append(6)
db_list.append(16)
# Acquire motifset from MS2LDA.org
client = requests.session()
token_output = client.get(server_url + 'initialise_api/', verify=False).json()
token = token_output['token']
data = {'csrfmiddlewaretoken': token}
data['motifset_id_list'] = db_list
data['filter'] = 'True'
response = client.post(server_url + 'get_motifset/', data=data, verify=False)
json_output = response.json()
assert response.status_code == 200
assert len(json_output['motifs']) > 0
assert len(json_output['metadata'] > 0)
print('Success!')
|
|
012ea594d171041a9ff064783b7c0a392a827d4c
|
spacy/tests/test_misc.py
|
spacy/tests/test_misc.py
|
# coding: utf-8
from __future__ import unicode_literals
from ..util import ensure_path
from pathlib import Path
import pytest
@pytest.mark.parametrize('text', ['hello/world', 'hello world'])
def test_util_ensure_path_succeeds(text):
path = ensure_path(text)
assert isinstance(path, Path)
@pytest.mark.parametrize('text', [b'hello/world', True, False, None])
def test_util_ensure_path_fails(text):
path = ensure_path(text)
assert not isinstance(path, Path)
|
Add file for misc tests
|
Add file for misc tests
|
Python
|
mit
|
honnibal/spaCy,explosion/spaCy,aikramer2/spaCy,recognai/spaCy,raphael0202/spaCy,aikramer2/spaCy,raphael0202/spaCy,spacy-io/spaCy,aikramer2/spaCy,explosion/spaCy,honnibal/spaCy,recognai/spaCy,spacy-io/spaCy,spacy-io/spaCy,recognai/spaCy,honnibal/spaCy,aikramer2/spaCy,explosion/spaCy,Gregory-Howard/spaCy,Gregory-Howard/spaCy,spacy-io/spaCy,raphael0202/spaCy,explosion/spaCy,recognai/spaCy,Gregory-Howard/spaCy,Gregory-Howard/spaCy,Gregory-Howard/spaCy,raphael0202/spaCy,aikramer2/spaCy,honnibal/spaCy,spacy-io/spaCy,raphael0202/spaCy,recognai/spaCy,spacy-io/spaCy,explosion/spaCy,aikramer2/spaCy,Gregory-Howard/spaCy,recognai/spaCy,raphael0202/spaCy,explosion/spaCy
|
Add file for misc tests
|
# coding: utf-8
from __future__ import unicode_literals
from ..util import ensure_path
from pathlib import Path
import pytest
@pytest.mark.parametrize('text', ['hello/world', 'hello world'])
def test_util_ensure_path_succeeds(text):
path = ensure_path(text)
assert isinstance(path, Path)
@pytest.mark.parametrize('text', [b'hello/world', True, False, None])
def test_util_ensure_path_fails(text):
path = ensure_path(text)
assert not isinstance(path, Path)
|
<commit_before><commit_msg>Add file for misc tests<commit_after>
|
# coding: utf-8
from __future__ import unicode_literals
from ..util import ensure_path
from pathlib import Path
import pytest
@pytest.mark.parametrize('text', ['hello/world', 'hello world'])
def test_util_ensure_path_succeeds(text):
path = ensure_path(text)
assert isinstance(path, Path)
@pytest.mark.parametrize('text', [b'hello/world', True, False, None])
def test_util_ensure_path_fails(text):
path = ensure_path(text)
assert not isinstance(path, Path)
|
Add file for misc tests# coding: utf-8
from __future__ import unicode_literals
from ..util import ensure_path
from pathlib import Path
import pytest
@pytest.mark.parametrize('text', ['hello/world', 'hello world'])
def test_util_ensure_path_succeeds(text):
path = ensure_path(text)
assert isinstance(path, Path)
@pytest.mark.parametrize('text', [b'hello/world', True, False, None])
def test_util_ensure_path_fails(text):
path = ensure_path(text)
assert not isinstance(path, Path)
|
<commit_before><commit_msg>Add file for misc tests<commit_after># coding: utf-8
from __future__ import unicode_literals
from ..util import ensure_path
from pathlib import Path
import pytest
@pytest.mark.parametrize('text', ['hello/world', 'hello world'])
def test_util_ensure_path_succeeds(text):
path = ensure_path(text)
assert isinstance(path, Path)
@pytest.mark.parametrize('text', [b'hello/world', True, False, None])
def test_util_ensure_path_fails(text):
path = ensure_path(text)
assert not isinstance(path, Path)
|
|
5ecaed42c8f4389a6d12851d41c22dad22e2a2d8
|
storm/src/py/resources/morelikethis.py
|
storm/src/py/resources/morelikethis.py
|
# -*- coding: utf-8 -*-
"""
zeit.recommend.morelikethis
~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module has no description.
Copyright: (c) 2013 by Nicolas Drebenstedt.
License: BSD, see LICENSE for more details.
"""
from elasticsearch import Elasticsearch
from storm import Bolt
from storm import log
from storm import emit
import urllib2
import time
class MorelikethisBolt(Bolt):
connections = {}
def initialize(self, conf, context):
host = conf.get('zeit.recommend.zonapi.host', 'localhost')
port = conf.get('zeit.recommend.zonapi.port', 8983)
self.url = 'http://%s:%s/solr/mlt?fl=href' % (host, port)
def recommend(self, paths):
aggregate = []
for path in paths:
# TODO: Retrieve article bodies here.
aggregate.append('')
body = ' '.join(aggregate)
req = urllib2.Request(url, body, {'Content-Type':'text/plain'})
raw = urllib2.urlopen(req)
data = raw.read()
response = json.loads(data)['response']
return list(i['href'][18:] for i in response['docs'])
def process(self, tup):
if tup.stream == 'control':
action, user = tup.values
if action == 'connect':
self.connections[user] = int(time.time())
elif action == 'disconnect':
del self.connections[user]
elif tup.stream == 'default':
user, paths = tup.values
if user in self.connections:
log('[MorelikethisBolt] Incoming: %s' % user)
recommendations = self.recommend(paths)
paths = list(set(paths))[:10]
emit([user, paths, recommendations])
if __name__ == '__main__':
MorelikethisBolt().run()
|
Add barebone content-based, Solr-powered recommender
|
Add barebone content-based, Solr-powered recommender
|
Python
|
bsd-2-clause
|
cutoffthetop/recommender,cutoffthetop/recommender,cutoffthetop/recommender
|
Add barebone content-based, Solr-powered recommender
|
# -*- coding: utf-8 -*-
"""
zeit.recommend.morelikethis
~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module has no description.
Copyright: (c) 2013 by Nicolas Drebenstedt.
License: BSD, see LICENSE for more details.
"""
from elasticsearch import Elasticsearch
from storm import Bolt
from storm import log
from storm import emit
import urllib2
import time
class MorelikethisBolt(Bolt):
connections = {}
def initialize(self, conf, context):
host = conf.get('zeit.recommend.zonapi.host', 'localhost')
port = conf.get('zeit.recommend.zonapi.port', 8983)
self.url = 'http://%s:%s/solr/mlt?fl=href' % (host, port)
def recommend(self, paths):
aggregate = []
for path in paths:
# TODO: Retrieve article bodies here.
aggregate.append('')
body = ' '.join(aggregate)
req = urllib2.Request(url, body, {'Content-Type':'text/plain'})
raw = urllib2.urlopen(req)
data = raw.read()
response = json.loads(data)['response']
return list(i['href'][18:] for i in response['docs'])
def process(self, tup):
if tup.stream == 'control':
action, user = tup.values
if action == 'connect':
self.connections[user] = int(time.time())
elif action == 'disconnect':
del self.connections[user]
elif tup.stream == 'default':
user, paths = tup.values
if user in self.connections:
log('[MorelikethisBolt] Incoming: %s' % user)
recommendations = self.recommend(paths)
paths = list(set(paths))[:10]
emit([user, paths, recommendations])
if __name__ == '__main__':
MorelikethisBolt().run()
|
<commit_before><commit_msg>Add barebone content-based, Solr-powered recommender<commit_after>
|
# -*- coding: utf-8 -*-
"""
zeit.recommend.morelikethis
~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module has no description.
Copyright: (c) 2013 by Nicolas Drebenstedt.
License: BSD, see LICENSE for more details.
"""
from elasticsearch import Elasticsearch
from storm import Bolt
from storm import log
from storm import emit
import urllib2
import time
class MorelikethisBolt(Bolt):
connections = {}
def initialize(self, conf, context):
host = conf.get('zeit.recommend.zonapi.host', 'localhost')
port = conf.get('zeit.recommend.zonapi.port', 8983)
self.url = 'http://%s:%s/solr/mlt?fl=href' % (host, port)
def recommend(self, paths):
aggregate = []
for path in paths:
# TODO: Retrieve article bodies here.
aggregate.append('')
body = ' '.join(aggregate)
req = urllib2.Request(url, body, {'Content-Type':'text/plain'})
raw = urllib2.urlopen(req)
data = raw.read()
response = json.loads(data)['response']
return list(i['href'][18:] for i in response['docs'])
def process(self, tup):
if tup.stream == 'control':
action, user = tup.values
if action == 'connect':
self.connections[user] = int(time.time())
elif action == 'disconnect':
del self.connections[user]
elif tup.stream == 'default':
user, paths = tup.values
if user in self.connections:
log('[MorelikethisBolt] Incoming: %s' % user)
recommendations = self.recommend(paths)
paths = list(set(paths))[:10]
emit([user, paths, recommendations])
if __name__ == '__main__':
MorelikethisBolt().run()
|
Add barebone content-based, Solr-powered recommender# -*- coding: utf-8 -*-
"""
zeit.recommend.morelikethis
~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module has no description.
Copyright: (c) 2013 by Nicolas Drebenstedt.
License: BSD, see LICENSE for more details.
"""
from elasticsearch import Elasticsearch
from storm import Bolt
from storm import log
from storm import emit
import urllib2
import time
class MorelikethisBolt(Bolt):
connections = {}
def initialize(self, conf, context):
host = conf.get('zeit.recommend.zonapi.host', 'localhost')
port = conf.get('zeit.recommend.zonapi.port', 8983)
self.url = 'http://%s:%s/solr/mlt?fl=href' % (host, port)
def recommend(self, paths):
aggregate = []
for path in paths:
# TODO: Retrieve article bodies here.
aggregate.append('')
body = ' '.join(aggregate)
req = urllib2.Request(url, body, {'Content-Type':'text/plain'})
raw = urllib2.urlopen(req)
data = raw.read()
response = json.loads(data)['response']
return list(i['href'][18:] for i in response['docs'])
def process(self, tup):
if tup.stream == 'control':
action, user = tup.values
if action == 'connect':
self.connections[user] = int(time.time())
elif action == 'disconnect':
del self.connections[user]
elif tup.stream == 'default':
user, paths = tup.values
if user in self.connections:
log('[MorelikethisBolt] Incoming: %s' % user)
recommendations = self.recommend(paths)
paths = list(set(paths))[:10]
emit([user, paths, recommendations])
if __name__ == '__main__':
MorelikethisBolt().run()
|
<commit_before><commit_msg>Add barebone content-based, Solr-powered recommender<commit_after># -*- coding: utf-8 -*-
"""
zeit.recommend.morelikethis
~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module has no description.
Copyright: (c) 2013 by Nicolas Drebenstedt.
License: BSD, see LICENSE for more details.
"""
from elasticsearch import Elasticsearch
from storm import Bolt
from storm import log
from storm import emit
import urllib2
import time
class MorelikethisBolt(Bolt):
connections = {}
def initialize(self, conf, context):
host = conf.get('zeit.recommend.zonapi.host', 'localhost')
port = conf.get('zeit.recommend.zonapi.port', 8983)
self.url = 'http://%s:%s/solr/mlt?fl=href' % (host, port)
def recommend(self, paths):
aggregate = []
for path in paths:
# TODO: Retrieve article bodies here.
aggregate.append('')
body = ' '.join(aggregate)
req = urllib2.Request(url, body, {'Content-Type':'text/plain'})
raw = urllib2.urlopen(req)
data = raw.read()
response = json.loads(data)['response']
return list(i['href'][18:] for i in response['docs'])
def process(self, tup):
if tup.stream == 'control':
action, user = tup.values
if action == 'connect':
self.connections[user] = int(time.time())
elif action == 'disconnect':
del self.connections[user]
elif tup.stream == 'default':
user, paths = tup.values
if user in self.connections:
log('[MorelikethisBolt] Incoming: %s' % user)
recommendations = self.recommend(paths)
paths = list(set(paths))[:10]
emit([user, paths, recommendations])
if __name__ == '__main__':
MorelikethisBolt().run()
|
|
3174797eea1bd7ae131a355ab020ad26274b379a
|
vispy/scene/cameras/tests/test_link.py
|
vispy/scene/cameras/tests/test_link.py
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
from vispy.scene.widgets import ViewBox
from vispy.testing import run_tests_if_main
def test_turntable_camera_link():
vbs = [ViewBox(camera='turntable') for _ in range(3)]
cams = [vb.camera for vb in vbs]
for cam in cams:
cam.elevation = 45.0
cam.azimuth = 120.0
cam.scale_factor = 4.0
cams[0].link(cams[1])
cams[0].link(cams[2], props=['azimuth', 'elevation'])
cams[1].elevation = 30.0
cams[1].azimuth = 90.0
cams[1].scale_factor = 2.0
assert cams[0].elevation == 30.0
assert cams[0].azimuth == 90.0
assert cams[0].scale_factor == 2.0
assert cams[2].elevation == 30.0
assert cams[2].azimuth == 90.0
assert cams[2].scale_factor == 4.0
def test_panzoom_link():
vbs = [ViewBox(camera='panzoom') for _ in range(4)]
cams = [vb.camera for vb in vbs]
for cam in cams:
cam.rect = (0, 0, 100, 100)
cams[0].link(cams[1])
cams[0].link(cams[2], axis='x')
cams[0].link(cams[3], axis='y')
cams[1].rect = (-20, -20, 130, 130)
assert cams[0].rect.pos == (-20, -20) and cams[0].rect.size == (130, 130)
assert cams[2].rect.pos == (-20, 0) and cams[2].rect.size == (130, 100)
assert cams[3].rect.pos == (0, -20) and cams[3].rect.size == (100, 130)
run_tests_if_main()
|
Add tests for camera linking
|
Add tests for camera linking
|
Python
|
bsd-3-clause
|
Eric89GXL/vispy,Eric89GXL/vispy,Eric89GXL/vispy
|
Add tests for camera linking
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
from vispy.scene.widgets import ViewBox
from vispy.testing import run_tests_if_main
def test_turntable_camera_link():
vbs = [ViewBox(camera='turntable') for _ in range(3)]
cams = [vb.camera for vb in vbs]
for cam in cams:
cam.elevation = 45.0
cam.azimuth = 120.0
cam.scale_factor = 4.0
cams[0].link(cams[1])
cams[0].link(cams[2], props=['azimuth', 'elevation'])
cams[1].elevation = 30.0
cams[1].azimuth = 90.0
cams[1].scale_factor = 2.0
assert cams[0].elevation == 30.0
assert cams[0].azimuth == 90.0
assert cams[0].scale_factor == 2.0
assert cams[2].elevation == 30.0
assert cams[2].azimuth == 90.0
assert cams[2].scale_factor == 4.0
def test_panzoom_link():
vbs = [ViewBox(camera='panzoom') for _ in range(4)]
cams = [vb.camera for vb in vbs]
for cam in cams:
cam.rect = (0, 0, 100, 100)
cams[0].link(cams[1])
cams[0].link(cams[2], axis='x')
cams[0].link(cams[3], axis='y')
cams[1].rect = (-20, -20, 130, 130)
assert cams[0].rect.pos == (-20, -20) and cams[0].rect.size == (130, 130)
assert cams[2].rect.pos == (-20, 0) and cams[2].rect.size == (130, 100)
assert cams[3].rect.pos == (0, -20) and cams[3].rect.size == (100, 130)
run_tests_if_main()
|
<commit_before><commit_msg>Add tests for camera linking<commit_after>
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
from vispy.scene.widgets import ViewBox
from vispy.testing import run_tests_if_main
def test_turntable_camera_link():
vbs = [ViewBox(camera='turntable') for _ in range(3)]
cams = [vb.camera for vb in vbs]
for cam in cams:
cam.elevation = 45.0
cam.azimuth = 120.0
cam.scale_factor = 4.0
cams[0].link(cams[1])
cams[0].link(cams[2], props=['azimuth', 'elevation'])
cams[1].elevation = 30.0
cams[1].azimuth = 90.0
cams[1].scale_factor = 2.0
assert cams[0].elevation == 30.0
assert cams[0].azimuth == 90.0
assert cams[0].scale_factor == 2.0
assert cams[2].elevation == 30.0
assert cams[2].azimuth == 90.0
assert cams[2].scale_factor == 4.0
def test_panzoom_link():
vbs = [ViewBox(camera='panzoom') for _ in range(4)]
cams = [vb.camera for vb in vbs]
for cam in cams:
cam.rect = (0, 0, 100, 100)
cams[0].link(cams[1])
cams[0].link(cams[2], axis='x')
cams[0].link(cams[3], axis='y')
cams[1].rect = (-20, -20, 130, 130)
assert cams[0].rect.pos == (-20, -20) and cams[0].rect.size == (130, 130)
assert cams[2].rect.pos == (-20, 0) and cams[2].rect.size == (130, 100)
assert cams[3].rect.pos == (0, -20) and cams[3].rect.size == (100, 130)
run_tests_if_main()
|
Add tests for camera linking# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
from vispy.scene.widgets import ViewBox
from vispy.testing import run_tests_if_main
def test_turntable_camera_link():
vbs = [ViewBox(camera='turntable') for _ in range(3)]
cams = [vb.camera for vb in vbs]
for cam in cams:
cam.elevation = 45.0
cam.azimuth = 120.0
cam.scale_factor = 4.0
cams[0].link(cams[1])
cams[0].link(cams[2], props=['azimuth', 'elevation'])
cams[1].elevation = 30.0
cams[1].azimuth = 90.0
cams[1].scale_factor = 2.0
assert cams[0].elevation == 30.0
assert cams[0].azimuth == 90.0
assert cams[0].scale_factor == 2.0
assert cams[2].elevation == 30.0
assert cams[2].azimuth == 90.0
assert cams[2].scale_factor == 4.0
def test_panzoom_link():
vbs = [ViewBox(camera='panzoom') for _ in range(4)]
cams = [vb.camera for vb in vbs]
for cam in cams:
cam.rect = (0, 0, 100, 100)
cams[0].link(cams[1])
cams[0].link(cams[2], axis='x')
cams[0].link(cams[3], axis='y')
cams[1].rect = (-20, -20, 130, 130)
assert cams[0].rect.pos == (-20, -20) and cams[0].rect.size == (130, 130)
assert cams[2].rect.pos == (-20, 0) and cams[2].rect.size == (130, 100)
assert cams[3].rect.pos == (0, -20) and cams[3].rect.size == (100, 130)
run_tests_if_main()
|
<commit_before><commit_msg>Add tests for camera linking<commit_after># -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
from vispy.scene.widgets import ViewBox
from vispy.testing import run_tests_if_main
def test_turntable_camera_link():
vbs = [ViewBox(camera='turntable') for _ in range(3)]
cams = [vb.camera for vb in vbs]
for cam in cams:
cam.elevation = 45.0
cam.azimuth = 120.0
cam.scale_factor = 4.0
cams[0].link(cams[1])
cams[0].link(cams[2], props=['azimuth', 'elevation'])
cams[1].elevation = 30.0
cams[1].azimuth = 90.0
cams[1].scale_factor = 2.0
assert cams[0].elevation == 30.0
assert cams[0].azimuth == 90.0
assert cams[0].scale_factor == 2.0
assert cams[2].elevation == 30.0
assert cams[2].azimuth == 90.0
assert cams[2].scale_factor == 4.0
def test_panzoom_link():
vbs = [ViewBox(camera='panzoom') for _ in range(4)]
cams = [vb.camera for vb in vbs]
for cam in cams:
cam.rect = (0, 0, 100, 100)
cams[0].link(cams[1])
cams[0].link(cams[2], axis='x')
cams[0].link(cams[3], axis='y')
cams[1].rect = (-20, -20, 130, 130)
assert cams[0].rect.pos == (-20, -20) and cams[0].rect.size == (130, 130)
assert cams[2].rect.pos == (-20, 0) and cams[2].rect.size == (130, 100)
assert cams[3].rect.pos == (0, -20) and cams[3].rect.size == (100, 130)
run_tests_if_main()
|
|
a2ff7e03da07f75b4ce8e67e9e2ef065f45e3338
|
messenger/make_octave.py
|
messenger/make_octave.py
|
#!/usr/bin/python
from __future__ import print_function
import os
import sys
import shutil
import subprocess
# Check the system platform first
platform = sys.platform
print("This is a " + platform + " system")
if platform.startswith('linux'):
messenger_dir = 'mexa64'
elif platform.startswith('darwin'):
messenger_dir = 'mexmaci64'
elif platform.startswith('win32'):
# We further need to differniate 32 from 64 bit:
maxint = sys.maxsize
if maxint == 9223372036854775807:
messenger_dir = 'mexw64'
elif maxint == 2147483647:
messenger_dir = 'mexw32'
with open(os.path.join(messenger_dir, 'local_octave.cfg')) as fid:
lines = fid.readlines()
cfg = {}
for line in lines:
name, path = line.split('=')
cfg[name.lower()] = path
print("Building messenger.oct...")
paths = "-L%(octave_lib)s -I%(octave_inc)s -L%(zmq_lib)s -I%(zmq_inc)s" % cfg
make_cmd = "mkoctfile %s -lzmq ./src/messenger.c" % paths
print(make_cmd)
subprocess.check_output(make_cmd.split())
messenger_exe = 'messenger.oct'
messenger_loc = os.path.join(messenger_dir, messenger_exe)
shutil.move(messenger_exe, messenger_loc)
os.remove('messenger.o')
|
Add an octave build script
|
Add an octave build script
|
Python
|
bsd-3-clause
|
jjangsangy/python-matlab-bridge,jjangsangy/python-matlab-bridge,arokem/python-matlab-bridge,arokem/python-matlab-bridge,blink1073/python-matlab-bridge,arokem/python-matlab-bridge,blink1073/python-matlab-bridge,jjangsangy/python-matlab-bridge,blink1073/python-matlab-bridge
|
Add an octave build script
|
#!/usr/bin/python
from __future__ import print_function
import os
import sys
import shutil
import subprocess
# Check the system platform first
platform = sys.platform
print("This is a " + platform + " system")
if platform.startswith('linux'):
messenger_dir = 'mexa64'
elif platform.startswith('darwin'):
messenger_dir = 'mexmaci64'
elif platform.startswith('win32'):
# We further need to differniate 32 from 64 bit:
maxint = sys.maxsize
if maxint == 9223372036854775807:
messenger_dir = 'mexw64'
elif maxint == 2147483647:
messenger_dir = 'mexw32'
with open(os.path.join(messenger_dir, 'local_octave.cfg')) as fid:
lines = fid.readlines()
cfg = {}
for line in lines:
name, path = line.split('=')
cfg[name.lower()] = path
print("Building messenger.oct...")
paths = "-L%(octave_lib)s -I%(octave_inc)s -L%(zmq_lib)s -I%(zmq_inc)s" % cfg
make_cmd = "mkoctfile %s -lzmq ./src/messenger.c" % paths
print(make_cmd)
subprocess.check_output(make_cmd.split())
messenger_exe = 'messenger.oct'
messenger_loc = os.path.join(messenger_dir, messenger_exe)
shutil.move(messenger_exe, messenger_loc)
os.remove('messenger.o')
|
<commit_before><commit_msg>Add an octave build script<commit_after>
|
#!/usr/bin/python
from __future__ import print_function
import os
import sys
import shutil
import subprocess
# Check the system platform first
platform = sys.platform
print("This is a " + platform + " system")
if platform.startswith('linux'):
messenger_dir = 'mexa64'
elif platform.startswith('darwin'):
messenger_dir = 'mexmaci64'
elif platform.startswith('win32'):
# We further need to differniate 32 from 64 bit:
maxint = sys.maxsize
if maxint == 9223372036854775807:
messenger_dir = 'mexw64'
elif maxint == 2147483647:
messenger_dir = 'mexw32'
with open(os.path.join(messenger_dir, 'local_octave.cfg')) as fid:
lines = fid.readlines()
cfg = {}
for line in lines:
name, path = line.split('=')
cfg[name.lower()] = path
print("Building messenger.oct...")
paths = "-L%(octave_lib)s -I%(octave_inc)s -L%(zmq_lib)s -I%(zmq_inc)s" % cfg
make_cmd = "mkoctfile %s -lzmq ./src/messenger.c" % paths
print(make_cmd)
subprocess.check_output(make_cmd.split())
messenger_exe = 'messenger.oct'
messenger_loc = os.path.join(messenger_dir, messenger_exe)
shutil.move(messenger_exe, messenger_loc)
os.remove('messenger.o')
|
Add an octave build script#!/usr/bin/python
from __future__ import print_function
import os
import sys
import shutil
import subprocess
# Check the system platform first
platform = sys.platform
print("This is a " + platform + " system")
if platform.startswith('linux'):
messenger_dir = 'mexa64'
elif platform.startswith('darwin'):
messenger_dir = 'mexmaci64'
elif platform.startswith('win32'):
# We further need to differniate 32 from 64 bit:
maxint = sys.maxsize
if maxint == 9223372036854775807:
messenger_dir = 'mexw64'
elif maxint == 2147483647:
messenger_dir = 'mexw32'
with open(os.path.join(messenger_dir, 'local_octave.cfg')) as fid:
lines = fid.readlines()
cfg = {}
for line in lines:
name, path = line.split('=')
cfg[name.lower()] = path
print("Building messenger.oct...")
paths = "-L%(octave_lib)s -I%(octave_inc)s -L%(zmq_lib)s -I%(zmq_inc)s" % cfg
make_cmd = "mkoctfile %s -lzmq ./src/messenger.c" % paths
print(make_cmd)
subprocess.check_output(make_cmd.split())
messenger_exe = 'messenger.oct'
messenger_loc = os.path.join(messenger_dir, messenger_exe)
shutil.move(messenger_exe, messenger_loc)
os.remove('messenger.o')
|
<commit_before><commit_msg>Add an octave build script<commit_after>#!/usr/bin/python
from __future__ import print_function
import os
import sys
import shutil
import subprocess
# Check the system platform first
platform = sys.platform
print("This is a " + platform + " system")
if platform.startswith('linux'):
messenger_dir = 'mexa64'
elif platform.startswith('darwin'):
messenger_dir = 'mexmaci64'
elif platform.startswith('win32'):
# We further need to differniate 32 from 64 bit:
maxint = sys.maxsize
if maxint == 9223372036854775807:
messenger_dir = 'mexw64'
elif maxint == 2147483647:
messenger_dir = 'mexw32'
with open(os.path.join(messenger_dir, 'local_octave.cfg')) as fid:
lines = fid.readlines()
cfg = {}
for line in lines:
name, path = line.split('=')
cfg[name.lower()] = path
print("Building messenger.oct...")
paths = "-L%(octave_lib)s -I%(octave_inc)s -L%(zmq_lib)s -I%(zmq_inc)s" % cfg
make_cmd = "mkoctfile %s -lzmq ./src/messenger.c" % paths
print(make_cmd)
subprocess.check_output(make_cmd.split())
messenger_exe = 'messenger.oct'
messenger_loc = os.path.join(messenger_dir, messenger_exe)
shutil.move(messenger_exe, messenger_loc)
os.remove('messenger.o')
|
|
3ccd2e71b314ba63c3df9f43635c92d996b5fed8
|
libcloud/test/common/test_base_driver.py
|
libcloud/test/common/test_base_driver.py
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from mock import Mock
from libcloud.common.base import BaseDriver
from libcloud.test import unittest
class BaseDriverTestCase(unittest.TestCase):
def test_timeout_argument_propagation_and_preservation(self):
class DummyDriver1(BaseDriver):
pass
# 1. No timeout provided
DummyDriver1.connectionCls = Mock()
DummyDriver1(key='foo')
call_kwargs = DummyDriver1.connectionCls.call_args[1]
self.assertEqual(call_kwargs['timeout'], None)
# 2. Timeout provided as constructor argument
class DummyDriver1(BaseDriver):
pass
DummyDriver1.connectionCls = Mock()
DummyDriver1(key='foo', timeout=12)
call_kwargs = DummyDriver1.connectionCls.call_args[1]
self.assertEqual(call_kwargs['timeout'], 12)
# 3. timeout provided via "_ex_connection_class_kwargs" method
class DummyDriver2(BaseDriver):
def _ex_connection_class_kwargs(self):
result = {}
result['timeout'] = 13
return result
DummyDriver2.connectionCls = Mock()
DummyDriver1(key='foo')
call_kwargs = DummyDriver1.connectionCls.call_args[1]
self.assertEqual(call_kwargs['timeout'], 13)
if __name__ == '__main__':
sys.exit(unittest.main())
|
Add failing regression test case for timeout argument preservation on the BaseDriver class.
|
Add failing regression test case for timeout argument preservation on the
BaseDriver class.
|
Python
|
apache-2.0
|
watermelo/libcloud,Scalr/libcloud,Scalr/libcloud,Kami/libcloud,lochiiconnectivity/libcloud,iPlantCollaborativeOpenSource/libcloud,ByteInternet/libcloud,Kami/libcloud,supertom/libcloud,t-tran/libcloud,DimensionDataCBUSydney/libcloud,mgogoulos/libcloud,illfelder/libcloud,StackPointCloud/libcloud,apache/libcloud,StackPointCloud/libcloud,niteoweb/libcloud,t-tran/libcloud,illfelder/libcloud,mistio/libcloud,mistio/libcloud,vongazman/libcloud,wuyuewen/libcloud,mathspace/libcloud,niteoweb/libcloud,samuelchong/libcloud,supertom/libcloud,t-tran/libcloud,watermelo/libcloud,apache/libcloud,lochiiconnectivity/libcloud,wuyuewen/libcloud,pquentin/libcloud,DimensionDataCBUSydney/libcloud,iPlantCollaborativeOpenSource/libcloud,niteoweb/libcloud,ZuluPro/libcloud,mgogoulos/libcloud,mgogoulos/libcloud,iPlantCollaborativeOpenSource/libcloud,techhat/libcloud,StackPointCloud/libcloud,NexusIS/libcloud,ByteInternet/libcloud,techhat/libcloud,SecurityCompass/libcloud,ZuluPro/libcloud,DimensionDataCBUSydney/libcloud,SecurityCompass/libcloud,Kami/libcloud,pquentin/libcloud,erjohnso/libcloud,samuelchong/libcloud,supertom/libcloud,SecurityCompass/libcloud,ZuluPro/libcloud,Scalr/libcloud,wrigri/libcloud,wrigri/libcloud,NexusIS/libcloud,erjohnso/libcloud,wuyuewen/libcloud,NexusIS/libcloud,andrewsomething/libcloud,mathspace/libcloud,wrigri/libcloud,apache/libcloud,andrewsomething/libcloud,erjohnso/libcloud,ByteInternet/libcloud,techhat/libcloud,mistio/libcloud,pquentin/libcloud,andrewsomething/libcloud,samuelchong/libcloud,lochiiconnectivity/libcloud,vongazman/libcloud,watermelo/libcloud,illfelder/libcloud,mathspace/libcloud,vongazman/libcloud
|
Add failing regression test case for timeout argument preservation on the
BaseDriver class.
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from mock import Mock
from libcloud.common.base import BaseDriver
from libcloud.test import unittest
class BaseDriverTestCase(unittest.TestCase):
def test_timeout_argument_propagation_and_preservation(self):
class DummyDriver1(BaseDriver):
pass
# 1. No timeout provided
DummyDriver1.connectionCls = Mock()
DummyDriver1(key='foo')
call_kwargs = DummyDriver1.connectionCls.call_args[1]
self.assertEqual(call_kwargs['timeout'], None)
# 2. Timeout provided as constructor argument
class DummyDriver1(BaseDriver):
pass
DummyDriver1.connectionCls = Mock()
DummyDriver1(key='foo', timeout=12)
call_kwargs = DummyDriver1.connectionCls.call_args[1]
self.assertEqual(call_kwargs['timeout'], 12)
# 3. timeout provided via "_ex_connection_class_kwargs" method
class DummyDriver2(BaseDriver):
def _ex_connection_class_kwargs(self):
result = {}
result['timeout'] = 13
return result
DummyDriver2.connectionCls = Mock()
DummyDriver1(key='foo')
call_kwargs = DummyDriver1.connectionCls.call_args[1]
self.assertEqual(call_kwargs['timeout'], 13)
if __name__ == '__main__':
sys.exit(unittest.main())
|
<commit_before><commit_msg>Add failing regression test case for timeout argument preservation on the
BaseDriver class.<commit_after>
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from mock import Mock
from libcloud.common.base import BaseDriver
from libcloud.test import unittest
class BaseDriverTestCase(unittest.TestCase):
def test_timeout_argument_propagation_and_preservation(self):
class DummyDriver1(BaseDriver):
pass
# 1. No timeout provided
DummyDriver1.connectionCls = Mock()
DummyDriver1(key='foo')
call_kwargs = DummyDriver1.connectionCls.call_args[1]
self.assertEqual(call_kwargs['timeout'], None)
# 2. Timeout provided as constructor argument
class DummyDriver1(BaseDriver):
pass
DummyDriver1.connectionCls = Mock()
DummyDriver1(key='foo', timeout=12)
call_kwargs = DummyDriver1.connectionCls.call_args[1]
self.assertEqual(call_kwargs['timeout'], 12)
# 3. timeout provided via "_ex_connection_class_kwargs" method
class DummyDriver2(BaseDriver):
def _ex_connection_class_kwargs(self):
result = {}
result['timeout'] = 13
return result
DummyDriver2.connectionCls = Mock()
DummyDriver1(key='foo')
call_kwargs = DummyDriver1.connectionCls.call_args[1]
self.assertEqual(call_kwargs['timeout'], 13)
if __name__ == '__main__':
sys.exit(unittest.main())
|
Add failing regression test case for timeout argument preservation on the
BaseDriver class.# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from mock import Mock
from libcloud.common.base import BaseDriver
from libcloud.test import unittest
class BaseDriverTestCase(unittest.TestCase):
def test_timeout_argument_propagation_and_preservation(self):
class DummyDriver1(BaseDriver):
pass
# 1. No timeout provided
DummyDriver1.connectionCls = Mock()
DummyDriver1(key='foo')
call_kwargs = DummyDriver1.connectionCls.call_args[1]
self.assertEqual(call_kwargs['timeout'], None)
# 2. Timeout provided as constructor argument
class DummyDriver1(BaseDriver):
pass
DummyDriver1.connectionCls = Mock()
DummyDriver1(key='foo', timeout=12)
call_kwargs = DummyDriver1.connectionCls.call_args[1]
self.assertEqual(call_kwargs['timeout'], 12)
# 3. timeout provided via "_ex_connection_class_kwargs" method
class DummyDriver2(BaseDriver):
def _ex_connection_class_kwargs(self):
result = {}
result['timeout'] = 13
return result
DummyDriver2.connectionCls = Mock()
DummyDriver1(key='foo')
call_kwargs = DummyDriver1.connectionCls.call_args[1]
self.assertEqual(call_kwargs['timeout'], 13)
if __name__ == '__main__':
sys.exit(unittest.main())
|
<commit_before><commit_msg>Add failing regression test case for timeout argument preservation on the
BaseDriver class.<commit_after># Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from mock import Mock
from libcloud.common.base import BaseDriver
from libcloud.test import unittest
class BaseDriverTestCase(unittest.TestCase):
def test_timeout_argument_propagation_and_preservation(self):
class DummyDriver1(BaseDriver):
pass
# 1. No timeout provided
DummyDriver1.connectionCls = Mock()
DummyDriver1(key='foo')
call_kwargs = DummyDriver1.connectionCls.call_args[1]
self.assertEqual(call_kwargs['timeout'], None)
# 2. Timeout provided as constructor argument
class DummyDriver1(BaseDriver):
pass
DummyDriver1.connectionCls = Mock()
DummyDriver1(key='foo', timeout=12)
call_kwargs = DummyDriver1.connectionCls.call_args[1]
self.assertEqual(call_kwargs['timeout'], 12)
# 3. timeout provided via "_ex_connection_class_kwargs" method
class DummyDriver2(BaseDriver):
def _ex_connection_class_kwargs(self):
result = {}
result['timeout'] = 13
return result
DummyDriver2.connectionCls = Mock()
DummyDriver1(key='foo')
call_kwargs = DummyDriver1.connectionCls.call_args[1]
self.assertEqual(call_kwargs['timeout'], 13)
if __name__ == '__main__':
sys.exit(unittest.main())
|
|
bf771afb7b6cdef86d6c310882299e31102425c9
|
examples/test_get_pdf_text.py
|
examples/test_get_pdf_text.py
|
from seleniumbase import BaseCase
class PdfTestClass(BaseCase):
def test_get_pdf_text(self):
pdf = ("https://nostarch.com/download/"
"Automate_the_Boring_Stuff_sample_ch17.pdf")
pdf_text = self.get_pdf_text(pdf, page=1)
print(pdf_text)
|
Add an example test for get_pdf_text(pdf, page)
|
Add an example test for get_pdf_text(pdf, page)
|
Python
|
mit
|
seleniumbase/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/SeleniumBase
|
Add an example test for get_pdf_text(pdf, page)
|
from seleniumbase import BaseCase
class PdfTestClass(BaseCase):
def test_get_pdf_text(self):
pdf = ("https://nostarch.com/download/"
"Automate_the_Boring_Stuff_sample_ch17.pdf")
pdf_text = self.get_pdf_text(pdf, page=1)
print(pdf_text)
|
<commit_before><commit_msg>Add an example test for get_pdf_text(pdf, page)<commit_after>
|
from seleniumbase import BaseCase
class PdfTestClass(BaseCase):
def test_get_pdf_text(self):
pdf = ("https://nostarch.com/download/"
"Automate_the_Boring_Stuff_sample_ch17.pdf")
pdf_text = self.get_pdf_text(pdf, page=1)
print(pdf_text)
|
Add an example test for get_pdf_text(pdf, page)from seleniumbase import BaseCase
class PdfTestClass(BaseCase):
def test_get_pdf_text(self):
pdf = ("https://nostarch.com/download/"
"Automate_the_Boring_Stuff_sample_ch17.pdf")
pdf_text = self.get_pdf_text(pdf, page=1)
print(pdf_text)
|
<commit_before><commit_msg>Add an example test for get_pdf_text(pdf, page)<commit_after>from seleniumbase import BaseCase
class PdfTestClass(BaseCase):
def test_get_pdf_text(self):
pdf = ("https://nostarch.com/download/"
"Automate_the_Boring_Stuff_sample_ch17.pdf")
pdf_text = self.get_pdf_text(pdf, page=1)
print(pdf_text)
|
|
f9c40170e545e851a8cf6c0f861aa590f1a6078e
|
tests/unit/modules/inspect_fsdb_test.py
|
tests/unit/modules/inspect_fsdb_test.py
|
# -*- coding: utf-8 -*-
#
# Copyright 2016 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
:codeauthor: :email:`Bo Maryniuk <bo@suse.de>`
'''
# Import Python Libs
from __future__ import absolute_import
import os
# Import Salt Testing Libs
from salttesting import TestCase, skipIf
from salttesting.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
from salttesting.helpers import ensure_in_syspath
from salt.modules.inspectlib.fsdb import CsvDB
from StringIO import StringIO
ensure_in_syspath('../../')
def mock_open(data=None):
'''
Mock "open" function in a simple way.
:param data:
:return:
'''
data = StringIO(data)
mock = MagicMock(spec=file)
handle = MagicMock(spec=file)
handle.write.return_value = None
handle.__enter__.return_value = data or handle
mock.return_value = handle
return mock
@skipIf(NO_MOCK, NO_MOCK_REASON)
class InspectorFSDBTestCase(TestCase):
'''
Test case for the FSDB: FileSystem Database.
FSDB is a very simple object-to-CSV storage with a very inefficient
update/delete operations (nice to have at some point) and efficient
storing/reading the objects (what is exactly needed for the functionality).
Main advantage of FSDB is to store Python objects in just a CSV files,
and have a very small code base.
'''
@patch("os.makedirs", MagicMock())
@patch("os.listdir", MagicMock(return_value=['test_db']))
@patch("gzip.open", mock_open("foo:int,bar:str"))
def test_open(self):
'''
Test opening the database.
:return:
'''
csvdb = CsvDB('/foobar')
csvdb.open()
assert csvdb.list_tables() == ['test_db']
@patch("os.makedirs", MagicMock())
@patch("os.listdir", MagicMock(return_value=['test_db']))
def test_list_databases(self):
'''
Test storing object into the database.
:return:
'''
csvdb = CsvDB('/foobar')
assert csvdb.list() == ['test_db']
|
Add initial tests for fsdb
|
Add initial tests for fsdb
|
Python
|
apache-2.0
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
Add initial tests for fsdb
|
# -*- coding: utf-8 -*-
#
# Copyright 2016 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
:codeauthor: :email:`Bo Maryniuk <bo@suse.de>`
'''
# Import Python Libs
from __future__ import absolute_import
import os
# Import Salt Testing Libs
from salttesting import TestCase, skipIf
from salttesting.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
from salttesting.helpers import ensure_in_syspath
from salt.modules.inspectlib.fsdb import CsvDB
from StringIO import StringIO
ensure_in_syspath('../../')
def mock_open(data=None):
'''
Mock "open" function in a simple way.
:param data:
:return:
'''
data = StringIO(data)
mock = MagicMock(spec=file)
handle = MagicMock(spec=file)
handle.write.return_value = None
handle.__enter__.return_value = data or handle
mock.return_value = handle
return mock
@skipIf(NO_MOCK, NO_MOCK_REASON)
class InspectorFSDBTestCase(TestCase):
'''
Test case for the FSDB: FileSystem Database.
FSDB is a very simple object-to-CSV storage with a very inefficient
update/delete operations (nice to have at some point) and efficient
storing/reading the objects (what is exactly needed for the functionality).
Main advantage of FSDB is to store Python objects in just a CSV files,
and have a very small code base.
'''
@patch("os.makedirs", MagicMock())
@patch("os.listdir", MagicMock(return_value=['test_db']))
@patch("gzip.open", mock_open("foo:int,bar:str"))
def test_open(self):
'''
Test opening the database.
:return:
'''
csvdb = CsvDB('/foobar')
csvdb.open()
assert csvdb.list_tables() == ['test_db']
@patch("os.makedirs", MagicMock())
@patch("os.listdir", MagicMock(return_value=['test_db']))
def test_list_databases(self):
'''
Test storing object into the database.
:return:
'''
csvdb = CsvDB('/foobar')
assert csvdb.list() == ['test_db']
|
<commit_before><commit_msg>Add initial tests for fsdb<commit_after>
|
# -*- coding: utf-8 -*-
#
# Copyright 2016 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
:codeauthor: :email:`Bo Maryniuk <bo@suse.de>`
'''
# Import Python Libs
from __future__ import absolute_import
import os
# Import Salt Testing Libs
from salttesting import TestCase, skipIf
from salttesting.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
from salttesting.helpers import ensure_in_syspath
from salt.modules.inspectlib.fsdb import CsvDB
from StringIO import StringIO
ensure_in_syspath('../../')
def mock_open(data=None):
'''
Mock "open" function in a simple way.
:param data:
:return:
'''
data = StringIO(data)
mock = MagicMock(spec=file)
handle = MagicMock(spec=file)
handle.write.return_value = None
handle.__enter__.return_value = data or handle
mock.return_value = handle
return mock
@skipIf(NO_MOCK, NO_MOCK_REASON)
class InspectorFSDBTestCase(TestCase):
'''
Test case for the FSDB: FileSystem Database.
FSDB is a very simple object-to-CSV storage with a very inefficient
update/delete operations (nice to have at some point) and efficient
storing/reading the objects (what is exactly needed for the functionality).
Main advantage of FSDB is to store Python objects in just a CSV files,
and have a very small code base.
'''
@patch("os.makedirs", MagicMock())
@patch("os.listdir", MagicMock(return_value=['test_db']))
@patch("gzip.open", mock_open("foo:int,bar:str"))
def test_open(self):
'''
Test opening the database.
:return:
'''
csvdb = CsvDB('/foobar')
csvdb.open()
assert csvdb.list_tables() == ['test_db']
@patch("os.makedirs", MagicMock())
@patch("os.listdir", MagicMock(return_value=['test_db']))
def test_list_databases(self):
'''
Test storing object into the database.
:return:
'''
csvdb = CsvDB('/foobar')
assert csvdb.list() == ['test_db']
|
Add initial tests for fsdb# -*- coding: utf-8 -*-
#
# Copyright 2016 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
:codeauthor: :email:`Bo Maryniuk <bo@suse.de>`
'''
# Import Python Libs
from __future__ import absolute_import
import os
# Import Salt Testing Libs
from salttesting import TestCase, skipIf
from salttesting.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
from salttesting.helpers import ensure_in_syspath
from salt.modules.inspectlib.fsdb import CsvDB
from StringIO import StringIO
ensure_in_syspath('../../')
def mock_open(data=None):
'''
Mock "open" function in a simple way.
:param data:
:return:
'''
data = StringIO(data)
mock = MagicMock(spec=file)
handle = MagicMock(spec=file)
handle.write.return_value = None
handle.__enter__.return_value = data or handle
mock.return_value = handle
return mock
@skipIf(NO_MOCK, NO_MOCK_REASON)
class InspectorFSDBTestCase(TestCase):
'''
Test case for the FSDB: FileSystem Database.
FSDB is a very simple object-to-CSV storage with a very inefficient
update/delete operations (nice to have at some point) and efficient
storing/reading the objects (what is exactly needed for the functionality).
Main advantage of FSDB is to store Python objects in just a CSV files,
and have a very small code base.
'''
@patch("os.makedirs", MagicMock())
@patch("os.listdir", MagicMock(return_value=['test_db']))
@patch("gzip.open", mock_open("foo:int,bar:str"))
def test_open(self):
'''
Test opening the database.
:return:
'''
csvdb = CsvDB('/foobar')
csvdb.open()
assert csvdb.list_tables() == ['test_db']
@patch("os.makedirs", MagicMock())
@patch("os.listdir", MagicMock(return_value=['test_db']))
def test_list_databases(self):
'''
Test storing object into the database.
:return:
'''
csvdb = CsvDB('/foobar')
assert csvdb.list() == ['test_db']
|
<commit_before><commit_msg>Add initial tests for fsdb<commit_after># -*- coding: utf-8 -*-
#
# Copyright 2016 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
:codeauthor: :email:`Bo Maryniuk <bo@suse.de>`
'''
# Import Python Libs
from __future__ import absolute_import
import os
# Import Salt Testing Libs
from salttesting import TestCase, skipIf
from salttesting.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
from salttesting.helpers import ensure_in_syspath
from salt.modules.inspectlib.fsdb import CsvDB
from StringIO import StringIO
ensure_in_syspath('../../')
def mock_open(data=None):
'''
Mock "open" function in a simple way.
:param data:
:return:
'''
data = StringIO(data)
mock = MagicMock(spec=file)
handle = MagicMock(spec=file)
handle.write.return_value = None
handle.__enter__.return_value = data or handle
mock.return_value = handle
return mock
@skipIf(NO_MOCK, NO_MOCK_REASON)
class InspectorFSDBTestCase(TestCase):
'''
Test case for the FSDB: FileSystem Database.
FSDB is a very simple object-to-CSV storage with a very inefficient
update/delete operations (nice to have at some point) and efficient
storing/reading the objects (what is exactly needed for the functionality).
Main advantage of FSDB is to store Python objects in just a CSV files,
and have a very small code base.
'''
@patch("os.makedirs", MagicMock())
@patch("os.listdir", MagicMock(return_value=['test_db']))
@patch("gzip.open", mock_open("foo:int,bar:str"))
def test_open(self):
'''
Test opening the database.
:return:
'''
csvdb = CsvDB('/foobar')
csvdb.open()
assert csvdb.list_tables() == ['test_db']
@patch("os.makedirs", MagicMock())
@patch("os.listdir", MagicMock(return_value=['test_db']))
def test_list_databases(self):
'''
Test storing object into the database.
:return:
'''
csvdb = CsvDB('/foobar')
assert csvdb.list() == ['test_db']
|
|
8e065cb164b4ec1a9aafd7f448555707a875fab9
|
aggregator/espn_fc.py
|
aggregator/espn_fc.py
|
import datetime
from aggregator import exceptions
from aggregator.base import Aggregator, Article, InvalidArticle, make_soup
EXCLUDE_IF_IN_TITLE = ['LIVE:', 'WATCH:', 'LISTEN:']
class ESPNFC(Aggregator):
base_url = 'http://www.espnfc.com/?country-view=www&lang-view=en'
source = 'ESPN FC'
def extract(self):
soup = make_soup(ESPNFC.base_url)
divs = soup.find('div', {'alt': ' TOP STORIES '})
divs = iter(divs.find_all('div', {'class': 'grid-item-content'}))
articles = (self.crawl(div) for div in divs)
return list(article for article in articles if article is not None)
def crawl(self, tag):
try:
anchor = tag.find('a', {'class': 'common-link'})
url = self.get_url(anchor)
title = self.get_title(anchor)
if any(exclude in title for exclude in EXCLUDE_IF_IN_TITLE):
return None
date_published = self.get_date_published(tag)
author = self.get_author(tag)
return Article(ESPNFC.source, title, url, author, date_published)
except exceptions.WebCrawlException as e:
return InvalidArticle(ESPNFC.source, e)
def get_author(self, tag):
try:
author = tag.find('span', {'class': 'author byline'})
return author.text.strip()
except AttributeError as e:
raise exceptions.AuthorNotFoundException
def get_date_published(self, tag):
try:
date_published = tag.find('time')['datetime']
date_published = date_published.split('T')[0]
date_published = datetime.datetime.strptime(date_published,
'%Y-%m-%d').date()
return date_published
except (IndexError, AttributeError, ValueError, TypeError):
raise exceptions.DatePublishedNotFoundException
def get_title(self, tag):
try:
return tag.text.strip()
except AttributeError as e:
raise exceptions.TitleNotFoundException
def get_url(self, tag):
try:
url = tag['href']
url = url.replace('.us', '.com')
return url
except (KeyError, AttributeError, TypeError):
raise exceptions.UrlNotFoundException
if __name__ == '__main__':
espn_fc = ESPNFC()
print(espn_fc.extract())
|
Implement web scraping functionality for ESPN FC
|
Implement web scraping functionality for ESPN FC
|
Python
|
apache-2.0
|
footynews/fn_backend
|
Implement web scraping functionality for ESPN FC
|
import datetime
from aggregator import exceptions
from aggregator.base import Aggregator, Article, InvalidArticle, make_soup
EXCLUDE_IF_IN_TITLE = ['LIVE:', 'WATCH:', 'LISTEN:']
class ESPNFC(Aggregator):
base_url = 'http://www.espnfc.com/?country-view=www&lang-view=en'
source = 'ESPN FC'
def extract(self):
soup = make_soup(ESPNFC.base_url)
divs = soup.find('div', {'alt': ' TOP STORIES '})
divs = iter(divs.find_all('div', {'class': 'grid-item-content'}))
articles = (self.crawl(div) for div in divs)
return list(article for article in articles if article is not None)
def crawl(self, tag):
try:
anchor = tag.find('a', {'class': 'common-link'})
url = self.get_url(anchor)
title = self.get_title(anchor)
if any(exclude in title for exclude in EXCLUDE_IF_IN_TITLE):
return None
date_published = self.get_date_published(tag)
author = self.get_author(tag)
return Article(ESPNFC.source, title, url, author, date_published)
except exceptions.WebCrawlException as e:
return InvalidArticle(ESPNFC.source, e)
def get_author(self, tag):
try:
author = tag.find('span', {'class': 'author byline'})
return author.text.strip()
except AttributeError as e:
raise exceptions.AuthorNotFoundException
def get_date_published(self, tag):
try:
date_published = tag.find('time')['datetime']
date_published = date_published.split('T')[0]
date_published = datetime.datetime.strptime(date_published,
'%Y-%m-%d').date()
return date_published
except (IndexError, AttributeError, ValueError, TypeError):
raise exceptions.DatePublishedNotFoundException
def get_title(self, tag):
try:
return tag.text.strip()
except AttributeError as e:
raise exceptions.TitleNotFoundException
def get_url(self, tag):
try:
url = tag['href']
url = url.replace('.us', '.com')
return url
except (KeyError, AttributeError, TypeError):
raise exceptions.UrlNotFoundException
if __name__ == '__main__':
espn_fc = ESPNFC()
print(espn_fc.extract())
|
<commit_before><commit_msg>Implement web scraping functionality for ESPN FC<commit_after>
|
import datetime
from aggregator import exceptions
from aggregator.base import Aggregator, Article, InvalidArticle, make_soup
EXCLUDE_IF_IN_TITLE = ['LIVE:', 'WATCH:', 'LISTEN:']
class ESPNFC(Aggregator):
base_url = 'http://www.espnfc.com/?country-view=www&lang-view=en'
source = 'ESPN FC'
def extract(self):
soup = make_soup(ESPNFC.base_url)
divs = soup.find('div', {'alt': ' TOP STORIES '})
divs = iter(divs.find_all('div', {'class': 'grid-item-content'}))
articles = (self.crawl(div) for div in divs)
return list(article for article in articles if article is not None)
def crawl(self, tag):
try:
anchor = tag.find('a', {'class': 'common-link'})
url = self.get_url(anchor)
title = self.get_title(anchor)
if any(exclude in title for exclude in EXCLUDE_IF_IN_TITLE):
return None
date_published = self.get_date_published(tag)
author = self.get_author(tag)
return Article(ESPNFC.source, title, url, author, date_published)
except exceptions.WebCrawlException as e:
return InvalidArticle(ESPNFC.source, e)
def get_author(self, tag):
try:
author = tag.find('span', {'class': 'author byline'})
return author.text.strip()
except AttributeError as e:
raise exceptions.AuthorNotFoundException
def get_date_published(self, tag):
try:
date_published = tag.find('time')['datetime']
date_published = date_published.split('T')[0]
date_published = datetime.datetime.strptime(date_published,
'%Y-%m-%d').date()
return date_published
except (IndexError, AttributeError, ValueError, TypeError):
raise exceptions.DatePublishedNotFoundException
def get_title(self, tag):
try:
return tag.text.strip()
except AttributeError as e:
raise exceptions.TitleNotFoundException
def get_url(self, tag):
try:
url = tag['href']
url = url.replace('.us', '.com')
return url
except (KeyError, AttributeError, TypeError):
raise exceptions.UrlNotFoundException
if __name__ == '__main__':
espn_fc = ESPNFC()
print(espn_fc.extract())
|
Implement web scraping functionality for ESPN FCimport datetime
from aggregator import exceptions
from aggregator.base import Aggregator, Article, InvalidArticle, make_soup
EXCLUDE_IF_IN_TITLE = ['LIVE:', 'WATCH:', 'LISTEN:']
class ESPNFC(Aggregator):
base_url = 'http://www.espnfc.com/?country-view=www&lang-view=en'
source = 'ESPN FC'
def extract(self):
soup = make_soup(ESPNFC.base_url)
divs = soup.find('div', {'alt': ' TOP STORIES '})
divs = iter(divs.find_all('div', {'class': 'grid-item-content'}))
articles = (self.crawl(div) for div in divs)
return list(article for article in articles if article is not None)
def crawl(self, tag):
try:
anchor = tag.find('a', {'class': 'common-link'})
url = self.get_url(anchor)
title = self.get_title(anchor)
if any(exclude in title for exclude in EXCLUDE_IF_IN_TITLE):
return None
date_published = self.get_date_published(tag)
author = self.get_author(tag)
return Article(ESPNFC.source, title, url, author, date_published)
except exceptions.WebCrawlException as e:
return InvalidArticle(ESPNFC.source, e)
def get_author(self, tag):
try:
author = tag.find('span', {'class': 'author byline'})
return author.text.strip()
except AttributeError as e:
raise exceptions.AuthorNotFoundException
def get_date_published(self, tag):
try:
date_published = tag.find('time')['datetime']
date_published = date_published.split('T')[0]
date_published = datetime.datetime.strptime(date_published,
'%Y-%m-%d').date()
return date_published
except (IndexError, AttributeError, ValueError, TypeError):
raise exceptions.DatePublishedNotFoundException
def get_title(self, tag):
try:
return tag.text.strip()
except AttributeError as e:
raise exceptions.TitleNotFoundException
def get_url(self, tag):
try:
url = tag['href']
url = url.replace('.us', '.com')
return url
except (KeyError, AttributeError, TypeError):
raise exceptions.UrlNotFoundException
if __name__ == '__main__':
espn_fc = ESPNFC()
print(espn_fc.extract())
|
<commit_before><commit_msg>Implement web scraping functionality for ESPN FC<commit_after>import datetime
from aggregator import exceptions
from aggregator.base import Aggregator, Article, InvalidArticle, make_soup
EXCLUDE_IF_IN_TITLE = ['LIVE:', 'WATCH:', 'LISTEN:']
class ESPNFC(Aggregator):
base_url = 'http://www.espnfc.com/?country-view=www&lang-view=en'
source = 'ESPN FC'
def extract(self):
soup = make_soup(ESPNFC.base_url)
divs = soup.find('div', {'alt': ' TOP STORIES '})
divs = iter(divs.find_all('div', {'class': 'grid-item-content'}))
articles = (self.crawl(div) for div in divs)
return list(article for article in articles if article is not None)
def crawl(self, tag):
try:
anchor = tag.find('a', {'class': 'common-link'})
url = self.get_url(anchor)
title = self.get_title(anchor)
if any(exclude in title for exclude in EXCLUDE_IF_IN_TITLE):
return None
date_published = self.get_date_published(tag)
author = self.get_author(tag)
return Article(ESPNFC.source, title, url, author, date_published)
except exceptions.WebCrawlException as e:
return InvalidArticle(ESPNFC.source, e)
def get_author(self, tag):
try:
author = tag.find('span', {'class': 'author byline'})
return author.text.strip()
except AttributeError as e:
raise exceptions.AuthorNotFoundException
def get_date_published(self, tag):
try:
date_published = tag.find('time')['datetime']
date_published = date_published.split('T')[0]
date_published = datetime.datetime.strptime(date_published,
'%Y-%m-%d').date()
return date_published
except (IndexError, AttributeError, ValueError, TypeError):
raise exceptions.DatePublishedNotFoundException
def get_title(self, tag):
try:
return tag.text.strip()
except AttributeError as e:
raise exceptions.TitleNotFoundException
def get_url(self, tag):
try:
url = tag['href']
url = url.replace('.us', '.com')
return url
except (KeyError, AttributeError, TypeError):
raise exceptions.UrlNotFoundException
if __name__ == '__main__':
espn_fc = ESPNFC()
print(espn_fc.extract())
|
|
1ec2cac94d7f98d651df32b21971039bddfbb996
|
tempest/tests/lib/services/volume/v2/test_extensions_client.py
|
tempest/tests/lib/services/volume/v2/test_extensions_client.py
|
# Copyright 2017 FiberHome Telecommunication Technologies CO.,LTD
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib.services.volume.v2 import extensions_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services import base
class TestExtensionsClient(base.BaseServiceTest):
FAKE_EXTENSION_LIST = {
"extensions": [
{
"updated": "2012-03-12T00:00:00+00:00",
"name": "QuotaClasses",
"links": [],
"namespace": "fake-namespace-1",
"alias": "os-quota-class-sets",
"description": "Quota classes management support."
},
{
"updated": "2013-05-29T00:00:00+00:00",
"name": "VolumeTransfer",
"links": [],
"namespace": "fake-namespace-2",
"alias": "os-volume-transfer",
"description": "Volume transfer management support."
},
{
"updated": "2014-02-10T00:00:00+00:00",
"name": "VolumeManage",
"links": [],
"namespace": "fake-namespace-3",
"alias": "os-volume-manage",
"description": "Manage existing backend storage by Cinder."
}
]
}
def setUp(self):
super(TestExtensionsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = extensions_client.ExtensionsClient(fake_auth,
'volume',
'regionOne')
def _test_list_extensions(self, bytes_body=False):
self.check_service_client_function(
self.client.list_extensions,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_EXTENSION_LIST,
bytes_body)
def test_list_extensions_with_str_body(self):
self._test_list_extensions()
def test_list_extensions_with_bytes_body(self):
self._test_list_extensions(bytes_body=True)
|
Add unit test for volume extensions client
|
Add unit test for volume extensions client
This patch adds unit test for volume v2 extensions client.
Partially Implements: blueprint tempest-lib-missing-test-coverage
Change-Id: I5a614fb92f44f5960610dc39299c6effb979be9a
|
Python
|
apache-2.0
|
masayukig/tempest,openstack/tempest,cisco-openstack/tempest,openstack/tempest,cisco-openstack/tempest,vedujoshi/tempest,Juniper/tempest,Juniper/tempest,vedujoshi/tempest,masayukig/tempest
|
Add unit test for volume extensions client
This patch adds unit test for volume v2 extensions client.
Partially Implements: blueprint tempest-lib-missing-test-coverage
Change-Id: I5a614fb92f44f5960610dc39299c6effb979be9a
|
# Copyright 2017 FiberHome Telecommunication Technologies CO.,LTD
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib.services.volume.v2 import extensions_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services import base
class TestExtensionsClient(base.BaseServiceTest):
FAKE_EXTENSION_LIST = {
"extensions": [
{
"updated": "2012-03-12T00:00:00+00:00",
"name": "QuotaClasses",
"links": [],
"namespace": "fake-namespace-1",
"alias": "os-quota-class-sets",
"description": "Quota classes management support."
},
{
"updated": "2013-05-29T00:00:00+00:00",
"name": "VolumeTransfer",
"links": [],
"namespace": "fake-namespace-2",
"alias": "os-volume-transfer",
"description": "Volume transfer management support."
},
{
"updated": "2014-02-10T00:00:00+00:00",
"name": "VolumeManage",
"links": [],
"namespace": "fake-namespace-3",
"alias": "os-volume-manage",
"description": "Manage existing backend storage by Cinder."
}
]
}
def setUp(self):
super(TestExtensionsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = extensions_client.ExtensionsClient(fake_auth,
'volume',
'regionOne')
def _test_list_extensions(self, bytes_body=False):
self.check_service_client_function(
self.client.list_extensions,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_EXTENSION_LIST,
bytes_body)
def test_list_extensions_with_str_body(self):
self._test_list_extensions()
def test_list_extensions_with_bytes_body(self):
self._test_list_extensions(bytes_body=True)
|
<commit_before><commit_msg>Add unit test for volume extensions client
This patch adds unit test for volume v2 extensions client.
Partially Implements: blueprint tempest-lib-missing-test-coverage
Change-Id: I5a614fb92f44f5960610dc39299c6effb979be9a<commit_after>
|
# Copyright 2017 FiberHome Telecommunication Technologies CO.,LTD
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib.services.volume.v2 import extensions_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services import base
class TestExtensionsClient(base.BaseServiceTest):
FAKE_EXTENSION_LIST = {
"extensions": [
{
"updated": "2012-03-12T00:00:00+00:00",
"name": "QuotaClasses",
"links": [],
"namespace": "fake-namespace-1",
"alias": "os-quota-class-sets",
"description": "Quota classes management support."
},
{
"updated": "2013-05-29T00:00:00+00:00",
"name": "VolumeTransfer",
"links": [],
"namespace": "fake-namespace-2",
"alias": "os-volume-transfer",
"description": "Volume transfer management support."
},
{
"updated": "2014-02-10T00:00:00+00:00",
"name": "VolumeManage",
"links": [],
"namespace": "fake-namespace-3",
"alias": "os-volume-manage",
"description": "Manage existing backend storage by Cinder."
}
]
}
def setUp(self):
super(TestExtensionsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = extensions_client.ExtensionsClient(fake_auth,
'volume',
'regionOne')
def _test_list_extensions(self, bytes_body=False):
self.check_service_client_function(
self.client.list_extensions,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_EXTENSION_LIST,
bytes_body)
def test_list_extensions_with_str_body(self):
self._test_list_extensions()
def test_list_extensions_with_bytes_body(self):
self._test_list_extensions(bytes_body=True)
|
Add unit test for volume extensions client
This patch adds unit test for volume v2 extensions client.
Partially Implements: blueprint tempest-lib-missing-test-coverage
Change-Id: I5a614fb92f44f5960610dc39299c6effb979be9a# Copyright 2017 FiberHome Telecommunication Technologies CO.,LTD
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib.services.volume.v2 import extensions_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services import base
class TestExtensionsClient(base.BaseServiceTest):
FAKE_EXTENSION_LIST = {
"extensions": [
{
"updated": "2012-03-12T00:00:00+00:00",
"name": "QuotaClasses",
"links": [],
"namespace": "fake-namespace-1",
"alias": "os-quota-class-sets",
"description": "Quota classes management support."
},
{
"updated": "2013-05-29T00:00:00+00:00",
"name": "VolumeTransfer",
"links": [],
"namespace": "fake-namespace-2",
"alias": "os-volume-transfer",
"description": "Volume transfer management support."
},
{
"updated": "2014-02-10T00:00:00+00:00",
"name": "VolumeManage",
"links": [],
"namespace": "fake-namespace-3",
"alias": "os-volume-manage",
"description": "Manage existing backend storage by Cinder."
}
]
}
def setUp(self):
super(TestExtensionsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = extensions_client.ExtensionsClient(fake_auth,
'volume',
'regionOne')
def _test_list_extensions(self, bytes_body=False):
self.check_service_client_function(
self.client.list_extensions,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_EXTENSION_LIST,
bytes_body)
def test_list_extensions_with_str_body(self):
self._test_list_extensions()
def test_list_extensions_with_bytes_body(self):
self._test_list_extensions(bytes_body=True)
|
<commit_before><commit_msg>Add unit test for volume extensions client
This patch adds unit test for volume v2 extensions client.
Partially Implements: blueprint tempest-lib-missing-test-coverage
Change-Id: I5a614fb92f44f5960610dc39299c6effb979be9a<commit_after># Copyright 2017 FiberHome Telecommunication Technologies CO.,LTD
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib.services.volume.v2 import extensions_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services import base
class TestExtensionsClient(base.BaseServiceTest):
FAKE_EXTENSION_LIST = {
"extensions": [
{
"updated": "2012-03-12T00:00:00+00:00",
"name": "QuotaClasses",
"links": [],
"namespace": "fake-namespace-1",
"alias": "os-quota-class-sets",
"description": "Quota classes management support."
},
{
"updated": "2013-05-29T00:00:00+00:00",
"name": "VolumeTransfer",
"links": [],
"namespace": "fake-namespace-2",
"alias": "os-volume-transfer",
"description": "Volume transfer management support."
},
{
"updated": "2014-02-10T00:00:00+00:00",
"name": "VolumeManage",
"links": [],
"namespace": "fake-namespace-3",
"alias": "os-volume-manage",
"description": "Manage existing backend storage by Cinder."
}
]
}
def setUp(self):
super(TestExtensionsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = extensions_client.ExtensionsClient(fake_auth,
'volume',
'regionOne')
def _test_list_extensions(self, bytes_body=False):
self.check_service_client_function(
self.client.list_extensions,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_EXTENSION_LIST,
bytes_body)
def test_list_extensions_with_str_body(self):
self._test_list_extensions()
def test_list_extensions_with_bytes_body(self):
self._test_list_extensions(bytes_body=True)
|
|
9ef706517a0ee3b460881d99d667fa5ba2829a12
|
admin/mailu/dockercli.py
|
admin/mailu/dockercli.py
|
from mailu import app
import docker
import signal
# Connect to the Docker socket
cli = docker.Client(base_url=app.config['DOCKER_SOCKET'])
def get(*names):
result = {}
all_containers = cli.containers(all=True)
for brief in all_containers:
if brief['Image'].startswith('mailu/'):
container = cli.inspect_container(brief['Id'])
container['Image'] = cli.inspect_image(container['Image'])
name = container['Config']['Labels']['com.docker.compose.service']
if not names or name in names:
result[name] = container
return result
def reload(*names):
for name, container in get(*names).items():
cli.kill(container["Id"], signal.SIGHUP.value)
|
Add the Docker client helper
|
Add the Docker client helper
|
Python
|
mit
|
kaiyou/freeposte.io,kaiyou/freeposte.io,kaiyou/freeposte.io,kaiyou/freeposte.io
|
Add the Docker client helper
|
from mailu import app
import docker
import signal
# Connect to the Docker socket
cli = docker.Client(base_url=app.config['DOCKER_SOCKET'])
def get(*names):
result = {}
all_containers = cli.containers(all=True)
for brief in all_containers:
if brief['Image'].startswith('mailu/'):
container = cli.inspect_container(brief['Id'])
container['Image'] = cli.inspect_image(container['Image'])
name = container['Config']['Labels']['com.docker.compose.service']
if not names or name in names:
result[name] = container
return result
def reload(*names):
for name, container in get(*names).items():
cli.kill(container["Id"], signal.SIGHUP.value)
|
<commit_before><commit_msg>Add the Docker client helper<commit_after>
|
from mailu import app
import docker
import signal
# Connect to the Docker socket
cli = docker.Client(base_url=app.config['DOCKER_SOCKET'])
def get(*names):
result = {}
all_containers = cli.containers(all=True)
for brief in all_containers:
if brief['Image'].startswith('mailu/'):
container = cli.inspect_container(brief['Id'])
container['Image'] = cli.inspect_image(container['Image'])
name = container['Config']['Labels']['com.docker.compose.service']
if not names or name in names:
result[name] = container
return result
def reload(*names):
for name, container in get(*names).items():
cli.kill(container["Id"], signal.SIGHUP.value)
|
Add the Docker client helperfrom mailu import app
import docker
import signal
# Connect to the Docker socket
cli = docker.Client(base_url=app.config['DOCKER_SOCKET'])
def get(*names):
result = {}
all_containers = cli.containers(all=True)
for brief in all_containers:
if brief['Image'].startswith('mailu/'):
container = cli.inspect_container(brief['Id'])
container['Image'] = cli.inspect_image(container['Image'])
name = container['Config']['Labels']['com.docker.compose.service']
if not names or name in names:
result[name] = container
return result
def reload(*names):
for name, container in get(*names).items():
cli.kill(container["Id"], signal.SIGHUP.value)
|
<commit_before><commit_msg>Add the Docker client helper<commit_after>from mailu import app
import docker
import signal
# Connect to the Docker socket
cli = docker.Client(base_url=app.config['DOCKER_SOCKET'])
def get(*names):
result = {}
all_containers = cli.containers(all=True)
for brief in all_containers:
if brief['Image'].startswith('mailu/'):
container = cli.inspect_container(brief['Id'])
container['Image'] = cli.inspect_image(container['Image'])
name = container['Config']['Labels']['com.docker.compose.service']
if not names or name in names:
result[name] = container
return result
def reload(*names):
for name, container in get(*names).items():
cli.kill(container["Id"], signal.SIGHUP.value)
|
|
d9c59ed8556c02d612eeb55fdfc3fc6ceef8844c
|
setup.py
|
setup.py
|
from setuptools import setup, find_packages
def get_long_description():
with open('./README.rst', 'r') as readme:
return readme.read()
setup(
name='django-email-tracker',
version='0.2',
description='Email Tracker for Django',
author='Venelina Yanakieva',
author_email='vili@magicsolutions.bg',
url='https://github.com/MagicSolutions/django-email-tracker',
long_description=get_long_description(),
packages=find_packages(),
zip_safe=False,
install_requires=['django>=1.4'],
include_package_data=True,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
]
)
|
from setuptools import setup, find_packages
def get_long_description():
with open('./README.rst', 'r') as readme:
return readme.read()
setup(
name='django-email-tracker',
version='0.2',
description='Email Tracker for Django',
author='Venelina Yanakieva',
author_email='vili@magicsolutions.bg',
maintainer='Venelin Stoykov',
maintainer_email='venelin@magicsolutions.bg',
url='https://github.com/MagicSolutions/django-email-tracker',
download_url='https://github.com/MagicSolutions/django-email-tracker/releases',
long_description=get_long_description(),
packages=find_packages(),
zip_safe=False,
install_requires=['django>=1.4'],
include_package_data=True,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
]
)
|
Add maintainer and download URL
|
Add maintainer and download URL
|
Python
|
mit
|
MagicSolutions/django-email-tracker,IndustriaTech/django-email-tracker
|
from setuptools import setup, find_packages
def get_long_description():
with open('./README.rst', 'r') as readme:
return readme.read()
setup(
name='django-email-tracker',
version='0.2',
description='Email Tracker for Django',
author='Venelina Yanakieva',
author_email='vili@magicsolutions.bg',
url='https://github.com/MagicSolutions/django-email-tracker',
long_description=get_long_description(),
packages=find_packages(),
zip_safe=False,
install_requires=['django>=1.4'],
include_package_data=True,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
]
)
Add maintainer and download URL
|
from setuptools import setup, find_packages
def get_long_description():
with open('./README.rst', 'r') as readme:
return readme.read()
setup(
name='django-email-tracker',
version='0.2',
description='Email Tracker for Django',
author='Venelina Yanakieva',
author_email='vili@magicsolutions.bg',
maintainer='Venelin Stoykov',
maintainer_email='venelin@magicsolutions.bg',
url='https://github.com/MagicSolutions/django-email-tracker',
download_url='https://github.com/MagicSolutions/django-email-tracker/releases',
long_description=get_long_description(),
packages=find_packages(),
zip_safe=False,
install_requires=['django>=1.4'],
include_package_data=True,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
]
)
|
<commit_before>from setuptools import setup, find_packages
def get_long_description():
with open('./README.rst', 'r') as readme:
return readme.read()
setup(
name='django-email-tracker',
version='0.2',
description='Email Tracker for Django',
author='Venelina Yanakieva',
author_email='vili@magicsolutions.bg',
url='https://github.com/MagicSolutions/django-email-tracker',
long_description=get_long_description(),
packages=find_packages(),
zip_safe=False,
install_requires=['django>=1.4'],
include_package_data=True,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
]
)
<commit_msg>Add maintainer and download URL<commit_after>
|
from setuptools import setup, find_packages
def get_long_description():
with open('./README.rst', 'r') as readme:
return readme.read()
setup(
name='django-email-tracker',
version='0.2',
description='Email Tracker for Django',
author='Venelina Yanakieva',
author_email='vili@magicsolutions.bg',
maintainer='Venelin Stoykov',
maintainer_email='venelin@magicsolutions.bg',
url='https://github.com/MagicSolutions/django-email-tracker',
download_url='https://github.com/MagicSolutions/django-email-tracker/releases',
long_description=get_long_description(),
packages=find_packages(),
zip_safe=False,
install_requires=['django>=1.4'],
include_package_data=True,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
]
)
|
from setuptools import setup, find_packages
def get_long_description():
with open('./README.rst', 'r') as readme:
return readme.read()
setup(
name='django-email-tracker',
version='0.2',
description='Email Tracker for Django',
author='Venelina Yanakieva',
author_email='vili@magicsolutions.bg',
url='https://github.com/MagicSolutions/django-email-tracker',
long_description=get_long_description(),
packages=find_packages(),
zip_safe=False,
install_requires=['django>=1.4'],
include_package_data=True,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
]
)
Add maintainer and download URLfrom setuptools import setup, find_packages
def get_long_description():
with open('./README.rst', 'r') as readme:
return readme.read()
setup(
name='django-email-tracker',
version='0.2',
description='Email Tracker for Django',
author='Venelina Yanakieva',
author_email='vili@magicsolutions.bg',
maintainer='Venelin Stoykov',
maintainer_email='venelin@magicsolutions.bg',
url='https://github.com/MagicSolutions/django-email-tracker',
download_url='https://github.com/MagicSolutions/django-email-tracker/releases',
long_description=get_long_description(),
packages=find_packages(),
zip_safe=False,
install_requires=['django>=1.4'],
include_package_data=True,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
]
)
|
<commit_before>from setuptools import setup, find_packages
def get_long_description():
with open('./README.rst', 'r') as readme:
return readme.read()
setup(
name='django-email-tracker',
version='0.2',
description='Email Tracker for Django',
author='Venelina Yanakieva',
author_email='vili@magicsolutions.bg',
url='https://github.com/MagicSolutions/django-email-tracker',
long_description=get_long_description(),
packages=find_packages(),
zip_safe=False,
install_requires=['django>=1.4'],
include_package_data=True,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
]
)
<commit_msg>Add maintainer and download URL<commit_after>from setuptools import setup, find_packages
def get_long_description():
with open('./README.rst', 'r') as readme:
return readme.read()
setup(
name='django-email-tracker',
version='0.2',
description='Email Tracker for Django',
author='Venelina Yanakieva',
author_email='vili@magicsolutions.bg',
maintainer='Venelin Stoykov',
maintainer_email='venelin@magicsolutions.bg',
url='https://github.com/MagicSolutions/django-email-tracker',
download_url='https://github.com/MagicSolutions/django-email-tracker/releases',
long_description=get_long_description(),
packages=find_packages(),
zip_safe=False,
install_requires=['django>=1.4'],
include_package_data=True,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
]
)
|
1ef6ec49377eee80c33f37ca7db5133404df9ce6
|
setup.py
|
setup.py
|
from setuptools import setup
setup(
name='PyHunter',
packages=['PyHunter'],
version='0.2',
description='An (unofficial) Python wrapper for the Hunter.io API',
author='Quentin Durantay',
author_email='quentin.durantay@gmail.com',
url='https://github.com/VonStruddle/PyHunter',
download_url='https://github.com/VonStruddle/PyHunter/archive/0.1.tar.gz',
install_requires=['requests'],
keywords=['hunter', 'hunter.io', 'lead generation', 'lead enrichment'],
classifiers=[],
)
|
from setuptools import setup
setup(
name='pyhunter',
packages=['pyhunter'],
version='0.2',
description='An (unofficial) Python wrapper for the Hunter.io API',
author='Quentin Durantay',
author_email='quentin.durantay@gmail.com',
url='https://github.com/VonStruddle/PyHunter',
download_url='https://github.com/VonStruddle/PyHunter/archive/0.1.tar.gz',
install_requires=['requests'],
keywords=['hunter', 'hunter.io', 'lead generation', 'lead enrichment'],
classifiers=[],
)
|
Use lowercase name for library
|
Use lowercase name for library
|
Python
|
mit
|
VonStruddle/PyHunter
|
from setuptools import setup
setup(
name='PyHunter',
packages=['PyHunter'],
version='0.2',
description='An (unofficial) Python wrapper for the Hunter.io API',
author='Quentin Durantay',
author_email='quentin.durantay@gmail.com',
url='https://github.com/VonStruddle/PyHunter',
download_url='https://github.com/VonStruddle/PyHunter/archive/0.1.tar.gz',
install_requires=['requests'],
keywords=['hunter', 'hunter.io', 'lead generation', 'lead enrichment'],
classifiers=[],
)
Use lowercase name for library
|
from setuptools import setup
setup(
name='pyhunter',
packages=['pyhunter'],
version='0.2',
description='An (unofficial) Python wrapper for the Hunter.io API',
author='Quentin Durantay',
author_email='quentin.durantay@gmail.com',
url='https://github.com/VonStruddle/PyHunter',
download_url='https://github.com/VonStruddle/PyHunter/archive/0.1.tar.gz',
install_requires=['requests'],
keywords=['hunter', 'hunter.io', 'lead generation', 'lead enrichment'],
classifiers=[],
)
|
<commit_before>from setuptools import setup
setup(
name='PyHunter',
packages=['PyHunter'],
version='0.2',
description='An (unofficial) Python wrapper for the Hunter.io API',
author='Quentin Durantay',
author_email='quentin.durantay@gmail.com',
url='https://github.com/VonStruddle/PyHunter',
download_url='https://github.com/VonStruddle/PyHunter/archive/0.1.tar.gz',
install_requires=['requests'],
keywords=['hunter', 'hunter.io', 'lead generation', 'lead enrichment'],
classifiers=[],
)
<commit_msg>Use lowercase name for library<commit_after>
|
from setuptools import setup
setup(
name='pyhunter',
packages=['pyhunter'],
version='0.2',
description='An (unofficial) Python wrapper for the Hunter.io API',
author='Quentin Durantay',
author_email='quentin.durantay@gmail.com',
url='https://github.com/VonStruddle/PyHunter',
download_url='https://github.com/VonStruddle/PyHunter/archive/0.1.tar.gz',
install_requires=['requests'],
keywords=['hunter', 'hunter.io', 'lead generation', 'lead enrichment'],
classifiers=[],
)
|
from setuptools import setup
setup(
name='PyHunter',
packages=['PyHunter'],
version='0.2',
description='An (unofficial) Python wrapper for the Hunter.io API',
author='Quentin Durantay',
author_email='quentin.durantay@gmail.com',
url='https://github.com/VonStruddle/PyHunter',
download_url='https://github.com/VonStruddle/PyHunter/archive/0.1.tar.gz',
install_requires=['requests'],
keywords=['hunter', 'hunter.io', 'lead generation', 'lead enrichment'],
classifiers=[],
)
Use lowercase name for libraryfrom setuptools import setup
setup(
name='pyhunter',
packages=['pyhunter'],
version='0.2',
description='An (unofficial) Python wrapper for the Hunter.io API',
author='Quentin Durantay',
author_email='quentin.durantay@gmail.com',
url='https://github.com/VonStruddle/PyHunter',
download_url='https://github.com/VonStruddle/PyHunter/archive/0.1.tar.gz',
install_requires=['requests'],
keywords=['hunter', 'hunter.io', 'lead generation', 'lead enrichment'],
classifiers=[],
)
|
<commit_before>from setuptools import setup
setup(
name='PyHunter',
packages=['PyHunter'],
version='0.2',
description='An (unofficial) Python wrapper for the Hunter.io API',
author='Quentin Durantay',
author_email='quentin.durantay@gmail.com',
url='https://github.com/VonStruddle/PyHunter',
download_url='https://github.com/VonStruddle/PyHunter/archive/0.1.tar.gz',
install_requires=['requests'],
keywords=['hunter', 'hunter.io', 'lead generation', 'lead enrichment'],
classifiers=[],
)
<commit_msg>Use lowercase name for library<commit_after>from setuptools import setup
setup(
name='pyhunter',
packages=['pyhunter'],
version='0.2',
description='An (unofficial) Python wrapper for the Hunter.io API',
author='Quentin Durantay',
author_email='quentin.durantay@gmail.com',
url='https://github.com/VonStruddle/PyHunter',
download_url='https://github.com/VonStruddle/PyHunter/archive/0.1.tar.gz',
install_requires=['requests'],
keywords=['hunter', 'hunter.io', 'lead generation', 'lead enrichment'],
classifiers=[],
)
|
be5ccf56a43732ca799ebd9197af98f0175f0b7f
|
CodeFights/floatRange.py
|
CodeFights/floatRange.py
|
#!/usr/local/bin/python
# Code Fights Float Range Problem
from itertools import count, takewhile
def floatRange(start, stop, step):
gen = takewhile(lambda x: x < stop, count(start, step))
return list(gen)
def main():
tests = [
[-0.9, 0.45, 0.2, [-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3]],
[1.5, 1.5, 10, []],
[1, 2, 1.5, [1]],
[-21.11, 21.11, 1.11,
[-21.11, -20, -18.89, -17.78, -16.67, -15.56, -14.45, -13.34,
-12.23, -11.12, -10.01, -8.9, -7.79, -6.68, -5.57, -4.46, -3.35,
-2.24, -1.13, -0.02, 1.09, 2.2, 3.31, 4.42, 5.53, 6.64, 7.75,
8.86, 9.97, 11.08, 12.19, 13.3, 14.41, 15.52, 16.63, 17.74, 18.85,
19.96, 21.07]],
[0, 1, 0.5,
[0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5,
0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]]
]
for t in tests:
res = floatRange(t[0], t[1], t[2])
if t[3] == res:
print("PASSED: floatRange({}, {}, {}) returned {}"
.format(t[0], t[1], t[2], res))
else:
print(("FAILED: floatRange({}, {}, {}) returned {},"
"answer: {}").format(t[0], t[1], t[2], res, t[3]))
if __name__ == '__main__':
main()
|
Solve Code Fights float range problem
|
Solve Code Fights float range problem
|
Python
|
mit
|
HKuz/Test_Code
|
Solve Code Fights float range problem
|
#!/usr/local/bin/python
# Code Fights Float Range Problem
from itertools import count, takewhile
def floatRange(start, stop, step):
gen = takewhile(lambda x: x < stop, count(start, step))
return list(gen)
def main():
tests = [
[-0.9, 0.45, 0.2, [-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3]],
[1.5, 1.5, 10, []],
[1, 2, 1.5, [1]],
[-21.11, 21.11, 1.11,
[-21.11, -20, -18.89, -17.78, -16.67, -15.56, -14.45, -13.34,
-12.23, -11.12, -10.01, -8.9, -7.79, -6.68, -5.57, -4.46, -3.35,
-2.24, -1.13, -0.02, 1.09, 2.2, 3.31, 4.42, 5.53, 6.64, 7.75,
8.86, 9.97, 11.08, 12.19, 13.3, 14.41, 15.52, 16.63, 17.74, 18.85,
19.96, 21.07]],
[0, 1, 0.5,
[0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5,
0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]]
]
for t in tests:
res = floatRange(t[0], t[1], t[2])
if t[3] == res:
print("PASSED: floatRange({}, {}, {}) returned {}"
.format(t[0], t[1], t[2], res))
else:
print(("FAILED: floatRange({}, {}, {}) returned {},"
"answer: {}").format(t[0], t[1], t[2], res, t[3]))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights float range problem<commit_after>
|
#!/usr/local/bin/python
# Code Fights Float Range Problem
from itertools import count, takewhile
def floatRange(start, stop, step):
gen = takewhile(lambda x: x < stop, count(start, step))
return list(gen)
def main():
tests = [
[-0.9, 0.45, 0.2, [-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3]],
[1.5, 1.5, 10, []],
[1, 2, 1.5, [1]],
[-21.11, 21.11, 1.11,
[-21.11, -20, -18.89, -17.78, -16.67, -15.56, -14.45, -13.34,
-12.23, -11.12, -10.01, -8.9, -7.79, -6.68, -5.57, -4.46, -3.35,
-2.24, -1.13, -0.02, 1.09, 2.2, 3.31, 4.42, 5.53, 6.64, 7.75,
8.86, 9.97, 11.08, 12.19, 13.3, 14.41, 15.52, 16.63, 17.74, 18.85,
19.96, 21.07]],
[0, 1, 0.5,
[0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5,
0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]]
]
for t in tests:
res = floatRange(t[0], t[1], t[2])
if t[3] == res:
print("PASSED: floatRange({}, {}, {}) returned {}"
.format(t[0], t[1], t[2], res))
else:
print(("FAILED: floatRange({}, {}, {}) returned {},"
"answer: {}").format(t[0], t[1], t[2], res, t[3]))
if __name__ == '__main__':
main()
|
Solve Code Fights float range problem#!/usr/local/bin/python
# Code Fights Float Range Problem
from itertools import count, takewhile
def floatRange(start, stop, step):
gen = takewhile(lambda x: x < stop, count(start, step))
return list(gen)
def main():
tests = [
[-0.9, 0.45, 0.2, [-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3]],
[1.5, 1.5, 10, []],
[1, 2, 1.5, [1]],
[-21.11, 21.11, 1.11,
[-21.11, -20, -18.89, -17.78, -16.67, -15.56, -14.45, -13.34,
-12.23, -11.12, -10.01, -8.9, -7.79, -6.68, -5.57, -4.46, -3.35,
-2.24, -1.13, -0.02, 1.09, 2.2, 3.31, 4.42, 5.53, 6.64, 7.75,
8.86, 9.97, 11.08, 12.19, 13.3, 14.41, 15.52, 16.63, 17.74, 18.85,
19.96, 21.07]],
[0, 1, 0.5,
[0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5,
0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]]
]
for t in tests:
res = floatRange(t[0], t[1], t[2])
if t[3] == res:
print("PASSED: floatRange({}, {}, {}) returned {}"
.format(t[0], t[1], t[2], res))
else:
print(("FAILED: floatRange({}, {}, {}) returned {},"
"answer: {}").format(t[0], t[1], t[2], res, t[3]))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights float range problem<commit_after>#!/usr/local/bin/python
# Code Fights Float Range Problem
from itertools import count, takewhile
def floatRange(start, stop, step):
gen = takewhile(lambda x: x < stop, count(start, step))
return list(gen)
def main():
tests = [
[-0.9, 0.45, 0.2, [-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3]],
[1.5, 1.5, 10, []],
[1, 2, 1.5, [1]],
[-21.11, 21.11, 1.11,
[-21.11, -20, -18.89, -17.78, -16.67, -15.56, -14.45, -13.34,
-12.23, -11.12, -10.01, -8.9, -7.79, -6.68, -5.57, -4.46, -3.35,
-2.24, -1.13, -0.02, 1.09, 2.2, 3.31, 4.42, 5.53, 6.64, 7.75,
8.86, 9.97, 11.08, 12.19, 13.3, 14.41, 15.52, 16.63, 17.74, 18.85,
19.96, 21.07]],
[0, 1, 0.5,
[0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5,
0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]]
]
for t in tests:
res = floatRange(t[0], t[1], t[2])
if t[3] == res:
print("PASSED: floatRange({}, {}, {}) returned {}"
.format(t[0], t[1], t[2], res))
else:
print(("FAILED: floatRange({}, {}, {}) returned {},"
"answer: {}").format(t[0], t[1], t[2], res, t[3]))
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.