commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f384a5d77cae62b6487d039e4e8dd51ef9cdc258
|
skyfield/tests/test_data_iers.py
|
skyfield/tests/test_data_iers.py
|
from numpy import array, inf
from skyfield.data.iers import _build_timescale_arrays
def test_build_timescale_arrays():
mjd = array([42046.00, 42047.00, 42048.00, 42049.00])
dut1 = array([-0.2942581, -0.2971424, 0.6999438, 0.6970539])
delta_t, leap_dates, leap_offsets = _build_timescale_arrays(mjd, dut1)
column_1, column_2 = delta_t
assert list(column_1) == [2442046.5005113888, 2442047.5005113888,
2442048.500522963, 2442049.500522963]
assert list(column_2) == [44.4782581, 44.481142399999996,
44.4840562, 44.4869461]
assert list(leap_dates) == [-inf, 2441317.5, 2441499.5, 2441683.5,
2442048.5, inf]
assert list(leap_offsets) == [10, 10, 10, 11, 12, 13]
|
Add explicit test for finals2000A.all converter
|
Add explicit test for finals2000A.all converter
|
Python
|
mit
|
skyfielders/python-skyfield,skyfielders/python-skyfield
|
Add explicit test for finals2000A.all converter
|
from numpy import array, inf
from skyfield.data.iers import _build_timescale_arrays
def test_build_timescale_arrays():
mjd = array([42046.00, 42047.00, 42048.00, 42049.00])
dut1 = array([-0.2942581, -0.2971424, 0.6999438, 0.6970539])
delta_t, leap_dates, leap_offsets = _build_timescale_arrays(mjd, dut1)
column_1, column_2 = delta_t
assert list(column_1) == [2442046.5005113888, 2442047.5005113888,
2442048.500522963, 2442049.500522963]
assert list(column_2) == [44.4782581, 44.481142399999996,
44.4840562, 44.4869461]
assert list(leap_dates) == [-inf, 2441317.5, 2441499.5, 2441683.5,
2442048.5, inf]
assert list(leap_offsets) == [10, 10, 10, 11, 12, 13]
|
<commit_before><commit_msg>Add explicit test for finals2000A.all converter<commit_after>
|
from numpy import array, inf
from skyfield.data.iers import _build_timescale_arrays
def test_build_timescale_arrays():
mjd = array([42046.00, 42047.00, 42048.00, 42049.00])
dut1 = array([-0.2942581, -0.2971424, 0.6999438, 0.6970539])
delta_t, leap_dates, leap_offsets = _build_timescale_arrays(mjd, dut1)
column_1, column_2 = delta_t
assert list(column_1) == [2442046.5005113888, 2442047.5005113888,
2442048.500522963, 2442049.500522963]
assert list(column_2) == [44.4782581, 44.481142399999996,
44.4840562, 44.4869461]
assert list(leap_dates) == [-inf, 2441317.5, 2441499.5, 2441683.5,
2442048.5, inf]
assert list(leap_offsets) == [10, 10, 10, 11, 12, 13]
|
Add explicit test for finals2000A.all converterfrom numpy import array, inf
from skyfield.data.iers import _build_timescale_arrays
def test_build_timescale_arrays():
mjd = array([42046.00, 42047.00, 42048.00, 42049.00])
dut1 = array([-0.2942581, -0.2971424, 0.6999438, 0.6970539])
delta_t, leap_dates, leap_offsets = _build_timescale_arrays(mjd, dut1)
column_1, column_2 = delta_t
assert list(column_1) == [2442046.5005113888, 2442047.5005113888,
2442048.500522963, 2442049.500522963]
assert list(column_2) == [44.4782581, 44.481142399999996,
44.4840562, 44.4869461]
assert list(leap_dates) == [-inf, 2441317.5, 2441499.5, 2441683.5,
2442048.5, inf]
assert list(leap_offsets) == [10, 10, 10, 11, 12, 13]
|
<commit_before><commit_msg>Add explicit test for finals2000A.all converter<commit_after>from numpy import array, inf
from skyfield.data.iers import _build_timescale_arrays
def test_build_timescale_arrays():
mjd = array([42046.00, 42047.00, 42048.00, 42049.00])
dut1 = array([-0.2942581, -0.2971424, 0.6999438, 0.6970539])
delta_t, leap_dates, leap_offsets = _build_timescale_arrays(mjd, dut1)
column_1, column_2 = delta_t
assert list(column_1) == [2442046.5005113888, 2442047.5005113888,
2442048.500522963, 2442049.500522963]
assert list(column_2) == [44.4782581, 44.481142399999996,
44.4840562, 44.4869461]
assert list(leap_dates) == [-inf, 2441317.5, 2441499.5, 2441683.5,
2442048.5, inf]
assert list(leap_offsets) == [10, 10, 10, 11, 12, 13]
|
|
87cfabdc68d42f1a269741a38780ed6c38e53cb1
|
examples/debug/shape_shifter.py
|
examples/debug/shape_shifter.py
|
# Import analysis/plotting modules
import analysis.event
import plotting.image
import numpy as np
# Set new random seed
np.random.seed()
# Specify the facility
state = {}
state['Facility'] = 'Dummy'
def shape_shifter():
if(np.random.random() < 0.5):
return np.random.rand(8,8)
else:
return np.random.rand(16,2)
# Create a dummy facility
state['Dummy'] = {
# The event repetition rate of the dummy facility [Hz]
'Repetition Rate' : 10,
# Dictionary of data sources
'Data Sources': {
# The name of the data source.
'CCD': {
# A function that will generate the data for every event
'data': shape_shifter,
# The units to be used
'unit': 'ADU',
# The name of the category for this data source.
# All data sources are aggregated by type, which is the key
# used when asking for them in the analysis code.
'type': 'photonPixelDetectors'
}
}
}
# This function is called for every single event
# following the given recipy of analysis
def onEvent(evt):
# Processin rate [Hz]
analysis.event.printProcessingRate()
# Visualize detector image
plotting.image.plotImage(evt['photonPixelDetectors']['CCD'], send_rate=10)
|
Add a source with changing shape for debuging
|
Add a source with changing shape for debuging
|
Python
|
bsd-2-clause
|
FXIhub/hummingbird,FXIhub/hummingbird
|
Add a source with changing shape for debuging
|
# Import analysis/plotting modules
import analysis.event
import plotting.image
import numpy as np
# Set new random seed
np.random.seed()
# Specify the facility
state = {}
state['Facility'] = 'Dummy'
def shape_shifter():
if(np.random.random() < 0.5):
return np.random.rand(8,8)
else:
return np.random.rand(16,2)
# Create a dummy facility
state['Dummy'] = {
# The event repetition rate of the dummy facility [Hz]
'Repetition Rate' : 10,
# Dictionary of data sources
'Data Sources': {
# The name of the data source.
'CCD': {
# A function that will generate the data for every event
'data': shape_shifter,
# The units to be used
'unit': 'ADU',
# The name of the category for this data source.
# All data sources are aggregated by type, which is the key
# used when asking for them in the analysis code.
'type': 'photonPixelDetectors'
}
}
}
# This function is called for every single event
# following the given recipy of analysis
def onEvent(evt):
# Processin rate [Hz]
analysis.event.printProcessingRate()
# Visualize detector image
plotting.image.plotImage(evt['photonPixelDetectors']['CCD'], send_rate=10)
|
<commit_before><commit_msg>Add a source with changing shape for debuging<commit_after>
|
# Import analysis/plotting modules
import analysis.event
import plotting.image
import numpy as np
# Set new random seed
np.random.seed()
# Specify the facility
state = {}
state['Facility'] = 'Dummy'
def shape_shifter():
if(np.random.random() < 0.5):
return np.random.rand(8,8)
else:
return np.random.rand(16,2)
# Create a dummy facility
state['Dummy'] = {
# The event repetition rate of the dummy facility [Hz]
'Repetition Rate' : 10,
# Dictionary of data sources
'Data Sources': {
# The name of the data source.
'CCD': {
# A function that will generate the data for every event
'data': shape_shifter,
# The units to be used
'unit': 'ADU',
# The name of the category for this data source.
# All data sources are aggregated by type, which is the key
# used when asking for them in the analysis code.
'type': 'photonPixelDetectors'
}
}
}
# This function is called for every single event
# following the given recipy of analysis
def onEvent(evt):
# Processin rate [Hz]
analysis.event.printProcessingRate()
# Visualize detector image
plotting.image.plotImage(evt['photonPixelDetectors']['CCD'], send_rate=10)
|
Add a source with changing shape for debuging# Import analysis/plotting modules
import analysis.event
import plotting.image
import numpy as np
# Set new random seed
np.random.seed()
# Specify the facility
state = {}
state['Facility'] = 'Dummy'
def shape_shifter():
if(np.random.random() < 0.5):
return np.random.rand(8,8)
else:
return np.random.rand(16,2)
# Create a dummy facility
state['Dummy'] = {
# The event repetition rate of the dummy facility [Hz]
'Repetition Rate' : 10,
# Dictionary of data sources
'Data Sources': {
# The name of the data source.
'CCD': {
# A function that will generate the data for every event
'data': shape_shifter,
# The units to be used
'unit': 'ADU',
# The name of the category for this data source.
# All data sources are aggregated by type, which is the key
# used when asking for them in the analysis code.
'type': 'photonPixelDetectors'
}
}
}
# This function is called for every single event
# following the given recipy of analysis
def onEvent(evt):
# Processin rate [Hz]
analysis.event.printProcessingRate()
# Visualize detector image
plotting.image.plotImage(evt['photonPixelDetectors']['CCD'], send_rate=10)
|
<commit_before><commit_msg>Add a source with changing shape for debuging<commit_after># Import analysis/plotting modules
import analysis.event
import plotting.image
import numpy as np
# Set new random seed
np.random.seed()
# Specify the facility
state = {}
state['Facility'] = 'Dummy'
def shape_shifter():
if(np.random.random() < 0.5):
return np.random.rand(8,8)
else:
return np.random.rand(16,2)
# Create a dummy facility
state['Dummy'] = {
# The event repetition rate of the dummy facility [Hz]
'Repetition Rate' : 10,
# Dictionary of data sources
'Data Sources': {
# The name of the data source.
'CCD': {
# A function that will generate the data for every event
'data': shape_shifter,
# The units to be used
'unit': 'ADU',
# The name of the category for this data source.
# All data sources are aggregated by type, which is the key
# used when asking for them in the analysis code.
'type': 'photonPixelDetectors'
}
}
}
# This function is called for every single event
# following the given recipy of analysis
def onEvent(evt):
# Processin rate [Hz]
analysis.event.printProcessingRate()
# Visualize detector image
plotting.image.plotImage(evt['photonPixelDetectors']['CCD'], send_rate=10)
|
|
98d27c8366a39dceaecf558535ec8312e0c03e92
|
bluebottle/cms/migrations/0064_auto_20171220_1145.py
|
bluebottle/cms/migrations/0064_auto_20171220_1145.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2017-12-20 10:45
from __future__ import unicode_literals
from django.db import migrations
from bluebottle.cms.models import Stat as RealStat
def migrate_stats_title(apps, schema_editor):
Stat = apps.get_model('cms', 'Stat')
for stat in Stat.objects.filter(title=''):
try:
stat.title = Stat.objects.filter(
block__language_code=stat.block.language_code,
block__placeholder=None,
type=stat.type
).exclude(title='').get().title
except Stat.DoesNotExist:
try:
stat.title = Stat.objects.filter(
type=stat.type, block__language_code=stat.block.language_code
).exclude(title='')[0].title
except IndexError:
if stat.type != 'manual':
stat.title = dict(RealStat.STAT_CHOICES)[stat.type]
stat.save()
class Migration(migrations.Migration):
dependencies = [
('cms', '0063_auto_20171204_1049'),
]
operations = [
migrations.RunPython(migrate_stats_title),
]
|
Fix incorrect migration that left out stat titles on result pages.
|
Fix incorrect migration that left out stat titles on result pages.
This should fix all stats on all tenants except for manual input.
BB-11474 #resolve
|
Python
|
bsd-3-clause
|
onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle
|
Fix incorrect migration that left out stat titles on result pages.
This should fix all stats on all tenants except for manual input.
BB-11474 #resolve
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2017-12-20 10:45
from __future__ import unicode_literals
from django.db import migrations
from bluebottle.cms.models import Stat as RealStat
def migrate_stats_title(apps, schema_editor):
Stat = apps.get_model('cms', 'Stat')
for stat in Stat.objects.filter(title=''):
try:
stat.title = Stat.objects.filter(
block__language_code=stat.block.language_code,
block__placeholder=None,
type=stat.type
).exclude(title='').get().title
except Stat.DoesNotExist:
try:
stat.title = Stat.objects.filter(
type=stat.type, block__language_code=stat.block.language_code
).exclude(title='')[0].title
except IndexError:
if stat.type != 'manual':
stat.title = dict(RealStat.STAT_CHOICES)[stat.type]
stat.save()
class Migration(migrations.Migration):
dependencies = [
('cms', '0063_auto_20171204_1049'),
]
operations = [
migrations.RunPython(migrate_stats_title),
]
|
<commit_before><commit_msg>Fix incorrect migration that left out stat titles on result pages.
This should fix all stats on all tenants except for manual input.
BB-11474 #resolve<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2017-12-20 10:45
from __future__ import unicode_literals
from django.db import migrations
from bluebottle.cms.models import Stat as RealStat
def migrate_stats_title(apps, schema_editor):
Stat = apps.get_model('cms', 'Stat')
for stat in Stat.objects.filter(title=''):
try:
stat.title = Stat.objects.filter(
block__language_code=stat.block.language_code,
block__placeholder=None,
type=stat.type
).exclude(title='').get().title
except Stat.DoesNotExist:
try:
stat.title = Stat.objects.filter(
type=stat.type, block__language_code=stat.block.language_code
).exclude(title='')[0].title
except IndexError:
if stat.type != 'manual':
stat.title = dict(RealStat.STAT_CHOICES)[stat.type]
stat.save()
class Migration(migrations.Migration):
dependencies = [
('cms', '0063_auto_20171204_1049'),
]
operations = [
migrations.RunPython(migrate_stats_title),
]
|
Fix incorrect migration that left out stat titles on result pages.
This should fix all stats on all tenants except for manual input.
BB-11474 #resolve# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2017-12-20 10:45
from __future__ import unicode_literals
from django.db import migrations
from bluebottle.cms.models import Stat as RealStat
def migrate_stats_title(apps, schema_editor):
Stat = apps.get_model('cms', 'Stat')
for stat in Stat.objects.filter(title=''):
try:
stat.title = Stat.objects.filter(
block__language_code=stat.block.language_code,
block__placeholder=None,
type=stat.type
).exclude(title='').get().title
except Stat.DoesNotExist:
try:
stat.title = Stat.objects.filter(
type=stat.type, block__language_code=stat.block.language_code
).exclude(title='')[0].title
except IndexError:
if stat.type != 'manual':
stat.title = dict(RealStat.STAT_CHOICES)[stat.type]
stat.save()
class Migration(migrations.Migration):
dependencies = [
('cms', '0063_auto_20171204_1049'),
]
operations = [
migrations.RunPython(migrate_stats_title),
]
|
<commit_before><commit_msg>Fix incorrect migration that left out stat titles on result pages.
This should fix all stats on all tenants except for manual input.
BB-11474 #resolve<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2017-12-20 10:45
from __future__ import unicode_literals
from django.db import migrations
from bluebottle.cms.models import Stat as RealStat
def migrate_stats_title(apps, schema_editor):
Stat = apps.get_model('cms', 'Stat')
for stat in Stat.objects.filter(title=''):
try:
stat.title = Stat.objects.filter(
block__language_code=stat.block.language_code,
block__placeholder=None,
type=stat.type
).exclude(title='').get().title
except Stat.DoesNotExist:
try:
stat.title = Stat.objects.filter(
type=stat.type, block__language_code=stat.block.language_code
).exclude(title='')[0].title
except IndexError:
if stat.type != 'manual':
stat.title = dict(RealStat.STAT_CHOICES)[stat.type]
stat.save()
class Migration(migrations.Migration):
dependencies = [
('cms', '0063_auto_20171204_1049'),
]
operations = [
migrations.RunPython(migrate_stats_title),
]
|
|
df885b68ae5b97fe12c34d32b3187ab6326ae04a
|
gmetad-python/gmetad_element.py
|
gmetad-python/gmetad_element.py
|
#/*******************************************************************************
#* Portions Copyright (C) 2008 Novell, Inc. All rights reserved.
#*
#* Redistribution and use in source and binary forms, with or without
#* modification, are permitted provided that the following conditions are met:
#*
#* - Redistributions of source code must retain the above copyright notice,
#* this list of conditions and the following disclaimer.
#*
#* - Redistributions in binary form must reproduce the above copyright notice,
#* this list of conditions and the following disclaimer in the documentation
#* and/or other materials provided with the distribution.
#*
#* - Neither the name of Novell, Inc. nor the names of its
#* contributors may be used to endorse or promote products derived from this
#* software without specific prior written permission.
#*
#* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
#* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
#* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
#* ARE DISCLAIMED. IN NO EVENT SHALL Novell, Inc. OR THE CONTRIBUTORS
#* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
#* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
#* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
#* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
#* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
#* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
#* POSSIBILITY OF SUCH DAMAGE.
#*
#* Authors: Matt Ryan (mrayn novell.com)
#* Brad Nicholes (bnicholes novell.com)
#******************************************************************************/
class Element:
def generateKey(vals):
if isinstance(vals,list):
return ':'.join(vals)
return vals
generateKey = staticmethod(generateKey)
def __init__(self, id, attrs):
self.id = id
for k,v in attrs.items():
self.__dict__[k.lower()] = v
self.children = {}
def __setitem__(self, k, v):
try:
self.children[k].update(v)
except KeyError:
self.children[k] = v
def __getitem__(self, k):
return self.children[k]
def update(self, elem):
for k in self.__dict__.keys():
if k == 'children' or k == 'id' or k == 'name':
continue
try:
self.__dict__[k] = elem.__dict__[k]
except ValueError:
pass
def __str__(self):
if self.__dict__.has_key('name'):
return Element.generateKey([self.id,self.name])
return Element.generateKey(self.id)
|
Split the element class from the data store so that it can be imported independently as needed
|
Split the element class from the data store so that it can be imported independently as needed
git-svn-id: 27e0aca8c7a52a9ae65dfba2e16879604119af8c@1336 93a4e39c-3214-0410-bb16-828d8e3bcd0f
|
Python
|
bsd-3-clause
|
fastly/ganglia,fastly/ganglia,fastly/ganglia,fastly/ganglia,fastly/ganglia
|
Split the element class from the data store so that it can be imported independently as needed
git-svn-id: 27e0aca8c7a52a9ae65dfba2e16879604119af8c@1336 93a4e39c-3214-0410-bb16-828d8e3bcd0f
|
#/*******************************************************************************
#* Portions Copyright (C) 2008 Novell, Inc. All rights reserved.
#*
#* Redistribution and use in source and binary forms, with or without
#* modification, are permitted provided that the following conditions are met:
#*
#* - Redistributions of source code must retain the above copyright notice,
#* this list of conditions and the following disclaimer.
#*
#* - Redistributions in binary form must reproduce the above copyright notice,
#* this list of conditions and the following disclaimer in the documentation
#* and/or other materials provided with the distribution.
#*
#* - Neither the name of Novell, Inc. nor the names of its
#* contributors may be used to endorse or promote products derived from this
#* software without specific prior written permission.
#*
#* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
#* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
#* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
#* ARE DISCLAIMED. IN NO EVENT SHALL Novell, Inc. OR THE CONTRIBUTORS
#* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
#* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
#* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
#* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
#* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
#* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
#* POSSIBILITY OF SUCH DAMAGE.
#*
#* Authors: Matt Ryan (mrayn novell.com)
#* Brad Nicholes (bnicholes novell.com)
#******************************************************************************/
class Element:
def generateKey(vals):
if isinstance(vals,list):
return ':'.join(vals)
return vals
generateKey = staticmethod(generateKey)
def __init__(self, id, attrs):
self.id = id
for k,v in attrs.items():
self.__dict__[k.lower()] = v
self.children = {}
def __setitem__(self, k, v):
try:
self.children[k].update(v)
except KeyError:
self.children[k] = v
def __getitem__(self, k):
return self.children[k]
def update(self, elem):
for k in self.__dict__.keys():
if k == 'children' or k == 'id' or k == 'name':
continue
try:
self.__dict__[k] = elem.__dict__[k]
except ValueError:
pass
def __str__(self):
if self.__dict__.has_key('name'):
return Element.generateKey([self.id,self.name])
return Element.generateKey(self.id)
|
<commit_before><commit_msg>Split the element class from the data store so that it can be imported independently as needed
git-svn-id: 27e0aca8c7a52a9ae65dfba2e16879604119af8c@1336 93a4e39c-3214-0410-bb16-828d8e3bcd0f<commit_after>
|
#/*******************************************************************************
#* Portions Copyright (C) 2008 Novell, Inc. All rights reserved.
#*
#* Redistribution and use in source and binary forms, with or without
#* modification, are permitted provided that the following conditions are met:
#*
#* - Redistributions of source code must retain the above copyright notice,
#* this list of conditions and the following disclaimer.
#*
#* - Redistributions in binary form must reproduce the above copyright notice,
#* this list of conditions and the following disclaimer in the documentation
#* and/or other materials provided with the distribution.
#*
#* - Neither the name of Novell, Inc. nor the names of its
#* contributors may be used to endorse or promote products derived from this
#* software without specific prior written permission.
#*
#* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
#* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
#* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
#* ARE DISCLAIMED. IN NO EVENT SHALL Novell, Inc. OR THE CONTRIBUTORS
#* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
#* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
#* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
#* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
#* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
#* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
#* POSSIBILITY OF SUCH DAMAGE.
#*
#* Authors: Matt Ryan (mrayn novell.com)
#* Brad Nicholes (bnicholes novell.com)
#******************************************************************************/
class Element:
def generateKey(vals):
if isinstance(vals,list):
return ':'.join(vals)
return vals
generateKey = staticmethod(generateKey)
def __init__(self, id, attrs):
self.id = id
for k,v in attrs.items():
self.__dict__[k.lower()] = v
self.children = {}
def __setitem__(self, k, v):
try:
self.children[k].update(v)
except KeyError:
self.children[k] = v
def __getitem__(self, k):
return self.children[k]
def update(self, elem):
for k in self.__dict__.keys():
if k == 'children' or k == 'id' or k == 'name':
continue
try:
self.__dict__[k] = elem.__dict__[k]
except ValueError:
pass
def __str__(self):
if self.__dict__.has_key('name'):
return Element.generateKey([self.id,self.name])
return Element.generateKey(self.id)
|
Split the element class from the data store so that it can be imported independently as needed
git-svn-id: 27e0aca8c7a52a9ae65dfba2e16879604119af8c@1336 93a4e39c-3214-0410-bb16-828d8e3bcd0f#/*******************************************************************************
#* Portions Copyright (C) 2008 Novell, Inc. All rights reserved.
#*
#* Redistribution and use in source and binary forms, with or without
#* modification, are permitted provided that the following conditions are met:
#*
#* - Redistributions of source code must retain the above copyright notice,
#* this list of conditions and the following disclaimer.
#*
#* - Redistributions in binary form must reproduce the above copyright notice,
#* this list of conditions and the following disclaimer in the documentation
#* and/or other materials provided with the distribution.
#*
#* - Neither the name of Novell, Inc. nor the names of its
#* contributors may be used to endorse or promote products derived from this
#* software without specific prior written permission.
#*
#* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
#* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
#* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
#* ARE DISCLAIMED. IN NO EVENT SHALL Novell, Inc. OR THE CONTRIBUTORS
#* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
#* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
#* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
#* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
#* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
#* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
#* POSSIBILITY OF SUCH DAMAGE.
#*
#* Authors: Matt Ryan (mrayn novell.com)
#* Brad Nicholes (bnicholes novell.com)
#******************************************************************************/
class Element:
def generateKey(vals):
if isinstance(vals,list):
return ':'.join(vals)
return vals
generateKey = staticmethod(generateKey)
def __init__(self, id, attrs):
self.id = id
for k,v in attrs.items():
self.__dict__[k.lower()] = v
self.children = {}
def __setitem__(self, k, v):
try:
self.children[k].update(v)
except KeyError:
self.children[k] = v
def __getitem__(self, k):
return self.children[k]
def update(self, elem):
for k in self.__dict__.keys():
if k == 'children' or k == 'id' or k == 'name':
continue
try:
self.__dict__[k] = elem.__dict__[k]
except ValueError:
pass
def __str__(self):
if self.__dict__.has_key('name'):
return Element.generateKey([self.id,self.name])
return Element.generateKey(self.id)
|
<commit_before><commit_msg>Split the element class from the data store so that it can be imported independently as needed
git-svn-id: 27e0aca8c7a52a9ae65dfba2e16879604119af8c@1336 93a4e39c-3214-0410-bb16-828d8e3bcd0f<commit_after>#/*******************************************************************************
#* Portions Copyright (C) 2008 Novell, Inc. All rights reserved.
#*
#* Redistribution and use in source and binary forms, with or without
#* modification, are permitted provided that the following conditions are met:
#*
#* - Redistributions of source code must retain the above copyright notice,
#* this list of conditions and the following disclaimer.
#*
#* - Redistributions in binary form must reproduce the above copyright notice,
#* this list of conditions and the following disclaimer in the documentation
#* and/or other materials provided with the distribution.
#*
#* - Neither the name of Novell, Inc. nor the names of its
#* contributors may be used to endorse or promote products derived from this
#* software without specific prior written permission.
#*
#* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
#* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
#* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
#* ARE DISCLAIMED. IN NO EVENT SHALL Novell, Inc. OR THE CONTRIBUTORS
#* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
#* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
#* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
#* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
#* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
#* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
#* POSSIBILITY OF SUCH DAMAGE.
#*
#* Authors: Matt Ryan (mrayn novell.com)
#* Brad Nicholes (bnicholes novell.com)
#******************************************************************************/
class Element:
def generateKey(vals):
if isinstance(vals,list):
return ':'.join(vals)
return vals
generateKey = staticmethod(generateKey)
def __init__(self, id, attrs):
self.id = id
for k,v in attrs.items():
self.__dict__[k.lower()] = v
self.children = {}
def __setitem__(self, k, v):
try:
self.children[k].update(v)
except KeyError:
self.children[k] = v
def __getitem__(self, k):
return self.children[k]
def update(self, elem):
for k in self.__dict__.keys():
if k == 'children' or k == 'id' or k == 'name':
continue
try:
self.__dict__[k] = elem.__dict__[k]
except ValueError:
pass
def __str__(self):
if self.__dict__.has_key('name'):
return Element.generateKey([self.id,self.name])
return Element.generateKey(self.id)
|
|
8930434e0e4a079c855ed4beb7d2304ebab78b45
|
tests/test_content_type_resolvers.py
|
tests/test_content_type_resolvers.py
|
import pytest
from odinweb import content_type_resolvers
from odinweb.testing import MockRequest
@pytest.mark.parametrize('resolver, args, http_request, expected', (
# Accepts Header
(content_type_resolvers.accepts_header, (), MockRequest(headers={'accepts': 'application/json'}), 'application/json'),
(content_type_resolvers.accepts_header, (), MockRequest(headers={'content-type': 'application/json'}), None),
# Content type header
(content_type_resolvers.content_type_header, (), MockRequest(headers={'accepts': 'application/json'}), None),
(content_type_resolvers.content_type_header, (), MockRequest(headers={'content-type': 'application/json'}), 'application/json'),
# Specific default
(content_type_resolvers.specific_default, ('application/json',), MockRequest(headers={'accepts': 'text/html'}), 'application/json'),
(content_type_resolvers.specific_default, ('application/json',), MockRequest(headers={'content-type': 'text/html'}), 'application/json'),
(content_type_resolvers.specific_default, ('application/json',), MockRequest(), 'application/json'),
))
def test_resolvers(resolver, args, http_request, expected):
instance = resolver(*args)
actual = instance(http_request)
assert actual == expected
|
Complete test coverage for content_type_resolvers
|
Complete test coverage for content_type_resolvers
|
Python
|
bsd-3-clause
|
python-odin/odinweb,python-odin/odinweb
|
Complete test coverage for content_type_resolvers
|
import pytest
from odinweb import content_type_resolvers
from odinweb.testing import MockRequest
@pytest.mark.parametrize('resolver, args, http_request, expected', (
# Accepts Header
(content_type_resolvers.accepts_header, (), MockRequest(headers={'accepts': 'application/json'}), 'application/json'),
(content_type_resolvers.accepts_header, (), MockRequest(headers={'content-type': 'application/json'}), None),
# Content type header
(content_type_resolvers.content_type_header, (), MockRequest(headers={'accepts': 'application/json'}), None),
(content_type_resolvers.content_type_header, (), MockRequest(headers={'content-type': 'application/json'}), 'application/json'),
# Specific default
(content_type_resolvers.specific_default, ('application/json',), MockRequest(headers={'accepts': 'text/html'}), 'application/json'),
(content_type_resolvers.specific_default, ('application/json',), MockRequest(headers={'content-type': 'text/html'}), 'application/json'),
(content_type_resolvers.specific_default, ('application/json',), MockRequest(), 'application/json'),
))
def test_resolvers(resolver, args, http_request, expected):
instance = resolver(*args)
actual = instance(http_request)
assert actual == expected
|
<commit_before><commit_msg>Complete test coverage for content_type_resolvers<commit_after>
|
import pytest
from odinweb import content_type_resolvers
from odinweb.testing import MockRequest
@pytest.mark.parametrize('resolver, args, http_request, expected', (
# Accepts Header
(content_type_resolvers.accepts_header, (), MockRequest(headers={'accepts': 'application/json'}), 'application/json'),
(content_type_resolvers.accepts_header, (), MockRequest(headers={'content-type': 'application/json'}), None),
# Content type header
(content_type_resolvers.content_type_header, (), MockRequest(headers={'accepts': 'application/json'}), None),
(content_type_resolvers.content_type_header, (), MockRequest(headers={'content-type': 'application/json'}), 'application/json'),
# Specific default
(content_type_resolvers.specific_default, ('application/json',), MockRequest(headers={'accepts': 'text/html'}), 'application/json'),
(content_type_resolvers.specific_default, ('application/json',), MockRequest(headers={'content-type': 'text/html'}), 'application/json'),
(content_type_resolvers.specific_default, ('application/json',), MockRequest(), 'application/json'),
))
def test_resolvers(resolver, args, http_request, expected):
instance = resolver(*args)
actual = instance(http_request)
assert actual == expected
|
Complete test coverage for content_type_resolversimport pytest
from odinweb import content_type_resolvers
from odinweb.testing import MockRequest
@pytest.mark.parametrize('resolver, args, http_request, expected', (
# Accepts Header
(content_type_resolvers.accepts_header, (), MockRequest(headers={'accepts': 'application/json'}), 'application/json'),
(content_type_resolvers.accepts_header, (), MockRequest(headers={'content-type': 'application/json'}), None),
# Content type header
(content_type_resolvers.content_type_header, (), MockRequest(headers={'accepts': 'application/json'}), None),
(content_type_resolvers.content_type_header, (), MockRequest(headers={'content-type': 'application/json'}), 'application/json'),
# Specific default
(content_type_resolvers.specific_default, ('application/json',), MockRequest(headers={'accepts': 'text/html'}), 'application/json'),
(content_type_resolvers.specific_default, ('application/json',), MockRequest(headers={'content-type': 'text/html'}), 'application/json'),
(content_type_resolvers.specific_default, ('application/json',), MockRequest(), 'application/json'),
))
def test_resolvers(resolver, args, http_request, expected):
instance = resolver(*args)
actual = instance(http_request)
assert actual == expected
|
<commit_before><commit_msg>Complete test coverage for content_type_resolvers<commit_after>import pytest
from odinweb import content_type_resolvers
from odinweb.testing import MockRequest
@pytest.mark.parametrize('resolver, args, http_request, expected', (
# Accepts Header
(content_type_resolvers.accepts_header, (), MockRequest(headers={'accepts': 'application/json'}), 'application/json'),
(content_type_resolvers.accepts_header, (), MockRequest(headers={'content-type': 'application/json'}), None),
# Content type header
(content_type_resolvers.content_type_header, (), MockRequest(headers={'accepts': 'application/json'}), None),
(content_type_resolvers.content_type_header, (), MockRequest(headers={'content-type': 'application/json'}), 'application/json'),
# Specific default
(content_type_resolvers.specific_default, ('application/json',), MockRequest(headers={'accepts': 'text/html'}), 'application/json'),
(content_type_resolvers.specific_default, ('application/json',), MockRequest(headers={'content-type': 'text/html'}), 'application/json'),
(content_type_resolvers.specific_default, ('application/json',), MockRequest(), 'application/json'),
))
def test_resolvers(resolver, args, http_request, expected):
instance = resolver(*args)
actual = instance(http_request)
assert actual == expected
|
|
5a5b9a84d7ef42d5e5ae08ebbcf66719dce10e85
|
bin/update/deploy-dev.py
|
bin/update/deploy-dev.py
|
"""
Deployment for Bedrock on www-dev.allizom.org.
Requires commander (https://github.com/oremj/commander) which is installed on
the systems that need it.
"""
import os
import sys
# these files are symlinked as 'update.py' in the project root.
ROOT = os.path.dirname(os.path.abspath(__file__))
sys.path.append(ROOT)
sys.path.append(os.path.join(ROOT, 'bedrock', 'bin', 'update'))
from deploy_dev_base import * # noqa
|
Add chief deploy script for www-dev.
|
Add chief deploy script for www-dev.
|
Python
|
mpl-2.0
|
malena/bedrock,craigcook/bedrock,l-hedgehog/bedrock,schalkneethling/bedrock,Sancus/bedrock,analytics-pros/mozilla-bedrock,ckprice/bedrock,sgarrity/bedrock,mkmelin/bedrock,ericawright/bedrock,mermi/bedrock,pmclanahan/bedrock,mahinthjoe/bedrock,flodolo/bedrock,ericawright/bedrock,TheJJ100100/bedrock,chirilo/bedrock,hoosteeno/bedrock,davehunt/bedrock,marcoscaceres/bedrock,jgmize/bedrock,ckprice/bedrock,amjadm61/bedrock,Jobava/bedrock,Jobava/bedrock,l-hedgehog/bedrock,glogiotatidis/bedrock,Sancus/bedrock,mermi/bedrock,pmclanahan/bedrock,jacshfr/mozilla-bedrock,schalkneethling/bedrock,hoosteeno/bedrock,CSCI-462-01-2017/bedrock,petabyte/bedrock,amjadm61/bedrock,petabyte/bedrock,mozilla/bedrock,gerv/bedrock,schalkneethling/bedrock,davehunt/bedrock,ckprice/bedrock,analytics-pros/mozilla-bedrock,Jobava/bedrock,analytics-pros/mozilla-bedrock,SujaySKumar/bedrock,jacshfr/mozilla-bedrock,mahinthjoe/bedrock,amjadm61/bedrock,CSCI-462-01-2017/bedrock,yglazko/bedrock,glogiotatidis/bedrock,amjadm61/bedrock,TheoChevalier/bedrock,bensternthal/bedrock,jacshfr/mozilla-bedrock,kyoshino/bedrock,jpetto/bedrock,mermi/bedrock,kyoshino/bedrock,gauthierm/bedrock,Jobava/bedrock,pascalchevrel/bedrock,chirilo/bedrock,petabyte/bedrock,TheJJ100100/bedrock,marcoscaceres/bedrock,gauthierm/bedrock,mkmelin/bedrock,yglazko/bedrock,dudepare/bedrock,flodolo/bedrock,jpetto/bedrock,l-hedgehog/bedrock,malena/bedrock,andreadelrio/bedrock,Sancus/bedrock,TheoChevalier/bedrock,sylvestre/bedrock,schalkneethling/bedrock,rishiloyola/bedrock,gauthierm/bedrock,sgarrity/bedrock,TheoChevalier/bedrock,CSCI-462-01-2017/bedrock,alexgibson/bedrock,hoosteeno/bedrock,kyoshino/bedrock,ericawright/bedrock,yglazko/bedrock,jacshfr/mozilla-bedrock,yglazko/bedrock,alexgibson/bedrock,glogiotatidis/bedrock,ckprice/bedrock,gerv/bedrock,MichaelKohler/bedrock,gerv/bedrock,jpetto/bedrock,jgmize/bedrock,pascalchevrel/bedrock,CSCI-462-01-2017/bedrock,craigcook/bedrock,malena/bedrock,mozilla/bedrock,mozilla/bedrock,sylvestre/bedrock,chirilo/bedrock,marcoscaceres/bedrock,pmclanahan/bedrock,mkmelin/bedrock,mozilla/bedrock,dudepare/bedrock,mahinthjoe/bedrock,petabyte/bedrock,craigcook/bedrock,amjadm61/bedrock,bensternthal/bedrock,MichaelKohler/bedrock,davehunt/bedrock,Sancus/bedrock,hoosteeno/bedrock,dudepare/bedrock,sylvestre/bedrock,jacshfr/mozilla-bedrock,gerv/bedrock,jgmize/bedrock,jpetto/bedrock,bensternthal/bedrock,marcoscaceres/bedrock,pascalchevrel/bedrock,sgarrity/bedrock,flodolo/bedrock,craigcook/bedrock,jgmize/bedrock,malena/bedrock,rishiloyola/bedrock,chirilo/bedrock,SujaySKumar/bedrock,glogiotatidis/bedrock,pascalchevrel/bedrock,analytics-pros/mozilla-bedrock,ericawright/bedrock,alexgibson/bedrock,bensternthal/bedrock,andreadelrio/bedrock,TheoChevalier/bedrock,mkmelin/bedrock,mahinthjoe/bedrock,SujaySKumar/bedrock,andreadelrio/bedrock,MichaelKohler/bedrock,davehunt/bedrock,dudepare/bedrock,sylvestre/bedrock,TheJJ100100/bedrock,rishiloyola/bedrock,pmclanahan/bedrock,flodolo/bedrock,gauthierm/bedrock,mermi/bedrock,alexgibson/bedrock,TheJJ100100/bedrock,kyoshino/bedrock,sgarrity/bedrock,andreadelrio/bedrock,l-hedgehog/bedrock,SujaySKumar/bedrock,MichaelKohler/bedrock,rishiloyola/bedrock
|
Add chief deploy script for www-dev.
|
"""
Deployment for Bedrock on www-dev.allizom.org.
Requires commander (https://github.com/oremj/commander) which is installed on
the systems that need it.
"""
import os
import sys
# these files are symlinked as 'update.py' in the project root.
ROOT = os.path.dirname(os.path.abspath(__file__))
sys.path.append(ROOT)
sys.path.append(os.path.join(ROOT, 'bedrock', 'bin', 'update'))
from deploy_dev_base import * # noqa
|
<commit_before><commit_msg>Add chief deploy script for www-dev.<commit_after>
|
"""
Deployment for Bedrock on www-dev.allizom.org.
Requires commander (https://github.com/oremj/commander) which is installed on
the systems that need it.
"""
import os
import sys
# these files are symlinked as 'update.py' in the project root.
ROOT = os.path.dirname(os.path.abspath(__file__))
sys.path.append(ROOT)
sys.path.append(os.path.join(ROOT, 'bedrock', 'bin', 'update'))
from deploy_dev_base import * # noqa
|
Add chief deploy script for www-dev."""
Deployment for Bedrock on www-dev.allizom.org.
Requires commander (https://github.com/oremj/commander) which is installed on
the systems that need it.
"""
import os
import sys
# these files are symlinked as 'update.py' in the project root.
ROOT = os.path.dirname(os.path.abspath(__file__))
sys.path.append(ROOT)
sys.path.append(os.path.join(ROOT, 'bedrock', 'bin', 'update'))
from deploy_dev_base import * # noqa
|
<commit_before><commit_msg>Add chief deploy script for www-dev.<commit_after>"""
Deployment for Bedrock on www-dev.allizom.org.
Requires commander (https://github.com/oremj/commander) which is installed on
the systems that need it.
"""
import os
import sys
# these files are symlinked as 'update.py' in the project root.
ROOT = os.path.dirname(os.path.abspath(__file__))
sys.path.append(ROOT)
sys.path.append(os.path.join(ROOT, 'bedrock', 'bin', 'update'))
from deploy_dev_base import * # noqa
|
|
1f38e1fbba7e83d35903587245c8f8e09cec9965
|
tool/zeroconf_ssh.py
|
tool/zeroconf_ssh.py
|
#!/usr/bin/python
import socket
import time
from zeroconf import *
def main():
print "Register SSH service ..."
service_type = "_ssh._tcp.local."
info = ServiceInfo(service_type,
"RPi3." + service_type,
socket.inet_aton("127.0.0.1"), 22,
0, 0, "", None)
zc = Zeroconf()
zc.register_service(info)
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
finally:
print("Unregistering ...")
zc.unregister_service(info)
zc.close()
if __name__ == '__main__':
main()
|
Add script to broadcast local ip via mdns, for easy to find the pi address without monitor
|
Add script to broadcast local ip via mdns, for easy to find the pi address without monitor
|
Python
|
apache-2.0
|
TimonLio/rex-pi,TimonLio/rex-pi
|
Add script to broadcast local ip via mdns, for easy to find the pi address without monitor
|
#!/usr/bin/python
import socket
import time
from zeroconf import *
def main():
print "Register SSH service ..."
service_type = "_ssh._tcp.local."
info = ServiceInfo(service_type,
"RPi3." + service_type,
socket.inet_aton("127.0.0.1"), 22,
0, 0, "", None)
zc = Zeroconf()
zc.register_service(info)
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
finally:
print("Unregistering ...")
zc.unregister_service(info)
zc.close()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to broadcast local ip via mdns, for easy to find the pi address without monitor<commit_after>
|
#!/usr/bin/python
import socket
import time
from zeroconf import *
def main():
print "Register SSH service ..."
service_type = "_ssh._tcp.local."
info = ServiceInfo(service_type,
"RPi3." + service_type,
socket.inet_aton("127.0.0.1"), 22,
0, 0, "", None)
zc = Zeroconf()
zc.register_service(info)
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
finally:
print("Unregistering ...")
zc.unregister_service(info)
zc.close()
if __name__ == '__main__':
main()
|
Add script to broadcast local ip via mdns, for easy to find the pi address without monitor#!/usr/bin/python
import socket
import time
from zeroconf import *
def main():
print "Register SSH service ..."
service_type = "_ssh._tcp.local."
info = ServiceInfo(service_type,
"RPi3." + service_type,
socket.inet_aton("127.0.0.1"), 22,
0, 0, "", None)
zc = Zeroconf()
zc.register_service(info)
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
finally:
print("Unregistering ...")
zc.unregister_service(info)
zc.close()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to broadcast local ip via mdns, for easy to find the pi address without monitor<commit_after>#!/usr/bin/python
import socket
import time
from zeroconf import *
def main():
print "Register SSH service ..."
service_type = "_ssh._tcp.local."
info = ServiceInfo(service_type,
"RPi3." + service_type,
socket.inet_aton("127.0.0.1"), 22,
0, 0, "", None)
zc = Zeroconf()
zc.register_service(info)
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
finally:
print("Unregistering ...")
zc.unregister_service(info)
zc.close()
if __name__ == '__main__':
main()
|
|
f83d04e389fdac327f8347f30bd2532cb26532ee
|
mongonaut/sites.py
|
mongonaut/sites.py
|
#from django.core.paginator import Paginate
try:
import floppyforms as forms
except ImportError:
from django import forms
# TODO add default widgets
class BaseMongoAdmin(object):
search_fields = []
#This shows up on the DocumentListView of the Posts
list_actions = []
# This shows up in the DocumentDetailView of the Posts.
document_actions = []
# shows up on a particular field
field_actions = {}
fields = None
exclude = None
fieldsets = None
form = forms.ModelForm
filter_vertical = ()
filter_horizontal = ()
radio_fields = {}
prepopulated_fields = {}
formfield_overrides = {}
readonly_fields = ()
ordering = None
class MongoAdmin(BaseMongoAdmin):
list_display = ('__str__',)
list_display_links = ()
list_filter = ()
list_select_related = False
list_per_page = 100
list_max_show_all = 200
list_editable = ()
search_fields = ()
save_as = False
save_on_top = False
#paginator = Paginator
inlines = []
|
#from django.core.paginator import Paginate
try:
import floppyforms as forms
except ImportError:
from django import forms
# TODO add default widgets
class BaseMongoAdmin(object):
search_fields = []
#This shows up on the DocumentListView of the Posts
list_actions = []
# This shows up in the DocumentDetailView of the Posts.
document_actions = []
# shows up on a particular field
field_actions = {}
fields = None
exclude = None
fieldsets = None
form = forms.ModelForm
filter_vertical = ()
filter_horizontal = ()
radio_fields = {}
prepopulated_fields = {}
formfield_overrides = {}
readonly_fields = ()
ordering = None
def has_permission(self, request):
"""
Returns True if the given HttpRequest has permission to view
*at least one* page in the mongonaut site.
"""
return request.user.is_active
def has_staff_permission(self, request):
"""
Returns True if the given HttpRequest has permission to view
*at least one* page in the mongonaut site.
"""
return request.user.is_active and request.user.is_staff
class MongoAdmin(BaseMongoAdmin):
list_display = ('__str__',)
list_display_links = ()
list_filter = ()
list_select_related = False
list_per_page = 100
list_max_show_all = 200
list_editable = ()
search_fields = ()
save_as = False
save_on_top = False
#paginator = Paginator
inlines = []
|
Add staff and user permission to views
|
Add staff and user permission to views
|
Python
|
mit
|
pydanny/django-mongonaut,lchsk/django-mongonaut,jazzband/django-mongonaut,pydanny/django-mongonaut,pydanny/django-mongonaut,jazzband/django-mongonaut,lchsk/django-mongonaut,lchsk/django-mongonaut,jazzband/django-mongonaut
|
#from django.core.paginator import Paginate
try:
import floppyforms as forms
except ImportError:
from django import forms
# TODO add default widgets
class BaseMongoAdmin(object):
search_fields = []
#This shows up on the DocumentListView of the Posts
list_actions = []
# This shows up in the DocumentDetailView of the Posts.
document_actions = []
# shows up on a particular field
field_actions = {}
fields = None
exclude = None
fieldsets = None
form = forms.ModelForm
filter_vertical = ()
filter_horizontal = ()
radio_fields = {}
prepopulated_fields = {}
formfield_overrides = {}
readonly_fields = ()
ordering = None
class MongoAdmin(BaseMongoAdmin):
list_display = ('__str__',)
list_display_links = ()
list_filter = ()
list_select_related = False
list_per_page = 100
list_max_show_all = 200
list_editable = ()
search_fields = ()
save_as = False
save_on_top = False
#paginator = Paginator
inlines = []Add staff and user permission to views
|
#from django.core.paginator import Paginate
try:
import floppyforms as forms
except ImportError:
from django import forms
# TODO add default widgets
class BaseMongoAdmin(object):
search_fields = []
#This shows up on the DocumentListView of the Posts
list_actions = []
# This shows up in the DocumentDetailView of the Posts.
document_actions = []
# shows up on a particular field
field_actions = {}
fields = None
exclude = None
fieldsets = None
form = forms.ModelForm
filter_vertical = ()
filter_horizontal = ()
radio_fields = {}
prepopulated_fields = {}
formfield_overrides = {}
readonly_fields = ()
ordering = None
def has_permission(self, request):
"""
Returns True if the given HttpRequest has permission to view
*at least one* page in the mongonaut site.
"""
return request.user.is_active
def has_staff_permission(self, request):
"""
Returns True if the given HttpRequest has permission to view
*at least one* page in the mongonaut site.
"""
return request.user.is_active and request.user.is_staff
class MongoAdmin(BaseMongoAdmin):
list_display = ('__str__',)
list_display_links = ()
list_filter = ()
list_select_related = False
list_per_page = 100
list_max_show_all = 200
list_editable = ()
search_fields = ()
save_as = False
save_on_top = False
#paginator = Paginator
inlines = []
|
<commit_before>#from django.core.paginator import Paginate
try:
import floppyforms as forms
except ImportError:
from django import forms
# TODO add default widgets
class BaseMongoAdmin(object):
search_fields = []
#This shows up on the DocumentListView of the Posts
list_actions = []
# This shows up in the DocumentDetailView of the Posts.
document_actions = []
# shows up on a particular field
field_actions = {}
fields = None
exclude = None
fieldsets = None
form = forms.ModelForm
filter_vertical = ()
filter_horizontal = ()
radio_fields = {}
prepopulated_fields = {}
formfield_overrides = {}
readonly_fields = ()
ordering = None
class MongoAdmin(BaseMongoAdmin):
list_display = ('__str__',)
list_display_links = ()
list_filter = ()
list_select_related = False
list_per_page = 100
list_max_show_all = 200
list_editable = ()
search_fields = ()
save_as = False
save_on_top = False
#paginator = Paginator
inlines = []<commit_msg>Add staff and user permission to views<commit_after>
|
#from django.core.paginator import Paginate
try:
import floppyforms as forms
except ImportError:
from django import forms
# TODO add default widgets
class BaseMongoAdmin(object):
search_fields = []
#This shows up on the DocumentListView of the Posts
list_actions = []
# This shows up in the DocumentDetailView of the Posts.
document_actions = []
# shows up on a particular field
field_actions = {}
fields = None
exclude = None
fieldsets = None
form = forms.ModelForm
filter_vertical = ()
filter_horizontal = ()
radio_fields = {}
prepopulated_fields = {}
formfield_overrides = {}
readonly_fields = ()
ordering = None
def has_permission(self, request):
"""
Returns True if the given HttpRequest has permission to view
*at least one* page in the mongonaut site.
"""
return request.user.is_active
def has_staff_permission(self, request):
"""
Returns True if the given HttpRequest has permission to view
*at least one* page in the mongonaut site.
"""
return request.user.is_active and request.user.is_staff
class MongoAdmin(BaseMongoAdmin):
list_display = ('__str__',)
list_display_links = ()
list_filter = ()
list_select_related = False
list_per_page = 100
list_max_show_all = 200
list_editable = ()
search_fields = ()
save_as = False
save_on_top = False
#paginator = Paginator
inlines = []
|
#from django.core.paginator import Paginate
try:
import floppyforms as forms
except ImportError:
from django import forms
# TODO add default widgets
class BaseMongoAdmin(object):
search_fields = []
#This shows up on the DocumentListView of the Posts
list_actions = []
# This shows up in the DocumentDetailView of the Posts.
document_actions = []
# shows up on a particular field
field_actions = {}
fields = None
exclude = None
fieldsets = None
form = forms.ModelForm
filter_vertical = ()
filter_horizontal = ()
radio_fields = {}
prepopulated_fields = {}
formfield_overrides = {}
readonly_fields = ()
ordering = None
class MongoAdmin(BaseMongoAdmin):
list_display = ('__str__',)
list_display_links = ()
list_filter = ()
list_select_related = False
list_per_page = 100
list_max_show_all = 200
list_editable = ()
search_fields = ()
save_as = False
save_on_top = False
#paginator = Paginator
inlines = []Add staff and user permission to views#from django.core.paginator import Paginate
try:
import floppyforms as forms
except ImportError:
from django import forms
# TODO add default widgets
class BaseMongoAdmin(object):
search_fields = []
#This shows up on the DocumentListView of the Posts
list_actions = []
# This shows up in the DocumentDetailView of the Posts.
document_actions = []
# shows up on a particular field
field_actions = {}
fields = None
exclude = None
fieldsets = None
form = forms.ModelForm
filter_vertical = ()
filter_horizontal = ()
radio_fields = {}
prepopulated_fields = {}
formfield_overrides = {}
readonly_fields = ()
ordering = None
def has_permission(self, request):
"""
Returns True if the given HttpRequest has permission to view
*at least one* page in the mongonaut site.
"""
return request.user.is_active
def has_staff_permission(self, request):
"""
Returns True if the given HttpRequest has permission to view
*at least one* page in the mongonaut site.
"""
return request.user.is_active and request.user.is_staff
class MongoAdmin(BaseMongoAdmin):
list_display = ('__str__',)
list_display_links = ()
list_filter = ()
list_select_related = False
list_per_page = 100
list_max_show_all = 200
list_editable = ()
search_fields = ()
save_as = False
save_on_top = False
#paginator = Paginator
inlines = []
|
<commit_before>#from django.core.paginator import Paginate
try:
import floppyforms as forms
except ImportError:
from django import forms
# TODO add default widgets
class BaseMongoAdmin(object):
search_fields = []
#This shows up on the DocumentListView of the Posts
list_actions = []
# This shows up in the DocumentDetailView of the Posts.
document_actions = []
# shows up on a particular field
field_actions = {}
fields = None
exclude = None
fieldsets = None
form = forms.ModelForm
filter_vertical = ()
filter_horizontal = ()
radio_fields = {}
prepopulated_fields = {}
formfield_overrides = {}
readonly_fields = ()
ordering = None
class MongoAdmin(BaseMongoAdmin):
list_display = ('__str__',)
list_display_links = ()
list_filter = ()
list_select_related = False
list_per_page = 100
list_max_show_all = 200
list_editable = ()
search_fields = ()
save_as = False
save_on_top = False
#paginator = Paginator
inlines = []<commit_msg>Add staff and user permission to views<commit_after>#from django.core.paginator import Paginate
try:
import floppyforms as forms
except ImportError:
from django import forms
# TODO add default widgets
class BaseMongoAdmin(object):
search_fields = []
#This shows up on the DocumentListView of the Posts
list_actions = []
# This shows up in the DocumentDetailView of the Posts.
document_actions = []
# shows up on a particular field
field_actions = {}
fields = None
exclude = None
fieldsets = None
form = forms.ModelForm
filter_vertical = ()
filter_horizontal = ()
radio_fields = {}
prepopulated_fields = {}
formfield_overrides = {}
readonly_fields = ()
ordering = None
def has_permission(self, request):
"""
Returns True if the given HttpRequest has permission to view
*at least one* page in the mongonaut site.
"""
return request.user.is_active
def has_staff_permission(self, request):
"""
Returns True if the given HttpRequest has permission to view
*at least one* page in the mongonaut site.
"""
return request.user.is_active and request.user.is_staff
class MongoAdmin(BaseMongoAdmin):
list_display = ('__str__',)
list_display_links = ()
list_filter = ()
list_select_related = False
list_per_page = 100
list_max_show_all = 200
list_editable = ()
search_fields = ()
save_as = False
save_on_top = False
#paginator = Paginator
inlines = []
|
8d1bfe6b62d65c709c2feb91cc09b94d1c95f600
|
examples/lvm_cachepool.py
|
examples/lvm_cachepool.py
|
import os
import blivet
from blivet.size import Size
from blivet.util import set_up_logging, create_sparse_tempfile
set_up_logging()
b = blivet.Blivet() # create an instance of Blivet (don't add system devices)
# create a disk image file on which to create new devices
disk1_file = create_sparse_tempfile("disk1", Size("100GiB"))
b.disk_images["disk1"] = disk1_file
disk2_file = create_sparse_tempfile("disk2", Size("100GiB"))
b.disk_images["disk2"] = disk2_file
b.reset()
try:
disk1 = b.devicetree.get_device_by_name("disk1")
disk2 = b.devicetree.get_device_by_name("disk2")
b.initialize_disk(disk1)
b.initialize_disk(disk2)
pv = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk1])
b.create_device(pv)
pv2 = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk2])
b.create_device(pv2)
# allocate the partitions (decide where and on which disks they'll reside)
blivet.partitioning.do_partitioning(b)
vg = b.new_vg(parents=[pv, pv2])
b.create_device(vg)
# new lv with base size 5GiB and growth up to 15GiB and an ext4 filesystem
lv = b.new_lv(fmt_type="ext4", size=Size("5GiB"), parents=[vg], name="cached")
b.create_device(lv)
# new cache pool
cpool = b.new_lv(size=Size("1 GiB"), parents=[vg], pvs=[pv2], cache_pool=True, name="fastlv")
b.create_device(cpool)
# write the new partitions to disk and format them as specified
b.do_it()
print(b.devicetree)
# attach the newly created cache pool to the "slow" LV
lv.attach_cache(cpool)
b.reset()
print(b.devicetree)
input("Check the state and hit ENTER to trigger cleanup")
finally:
b.devicetree.teardown_disk_images()
os.unlink(disk1_file)
os.unlink(disk2_file)
|
Add LVM cache pool example
|
examples: Add LVM cache pool example
|
Python
|
lgpl-2.1
|
vojtechtrefny/blivet,vojtechtrefny/blivet,rvykydal/blivet,rvykydal/blivet
|
examples: Add LVM cache pool example
|
import os
import blivet
from blivet.size import Size
from blivet.util import set_up_logging, create_sparse_tempfile
set_up_logging()
b = blivet.Blivet() # create an instance of Blivet (don't add system devices)
# create a disk image file on which to create new devices
disk1_file = create_sparse_tempfile("disk1", Size("100GiB"))
b.disk_images["disk1"] = disk1_file
disk2_file = create_sparse_tempfile("disk2", Size("100GiB"))
b.disk_images["disk2"] = disk2_file
b.reset()
try:
disk1 = b.devicetree.get_device_by_name("disk1")
disk2 = b.devicetree.get_device_by_name("disk2")
b.initialize_disk(disk1)
b.initialize_disk(disk2)
pv = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk1])
b.create_device(pv)
pv2 = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk2])
b.create_device(pv2)
# allocate the partitions (decide where and on which disks they'll reside)
blivet.partitioning.do_partitioning(b)
vg = b.new_vg(parents=[pv, pv2])
b.create_device(vg)
# new lv with base size 5GiB and growth up to 15GiB and an ext4 filesystem
lv = b.new_lv(fmt_type="ext4", size=Size("5GiB"), parents=[vg], name="cached")
b.create_device(lv)
# new cache pool
cpool = b.new_lv(size=Size("1 GiB"), parents=[vg], pvs=[pv2], cache_pool=True, name="fastlv")
b.create_device(cpool)
# write the new partitions to disk and format them as specified
b.do_it()
print(b.devicetree)
# attach the newly created cache pool to the "slow" LV
lv.attach_cache(cpool)
b.reset()
print(b.devicetree)
input("Check the state and hit ENTER to trigger cleanup")
finally:
b.devicetree.teardown_disk_images()
os.unlink(disk1_file)
os.unlink(disk2_file)
|
<commit_before><commit_msg>examples: Add LVM cache pool example<commit_after>
|
import os
import blivet
from blivet.size import Size
from blivet.util import set_up_logging, create_sparse_tempfile
set_up_logging()
b = blivet.Blivet() # create an instance of Blivet (don't add system devices)
# create a disk image file on which to create new devices
disk1_file = create_sparse_tempfile("disk1", Size("100GiB"))
b.disk_images["disk1"] = disk1_file
disk2_file = create_sparse_tempfile("disk2", Size("100GiB"))
b.disk_images["disk2"] = disk2_file
b.reset()
try:
disk1 = b.devicetree.get_device_by_name("disk1")
disk2 = b.devicetree.get_device_by_name("disk2")
b.initialize_disk(disk1)
b.initialize_disk(disk2)
pv = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk1])
b.create_device(pv)
pv2 = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk2])
b.create_device(pv2)
# allocate the partitions (decide where and on which disks they'll reside)
blivet.partitioning.do_partitioning(b)
vg = b.new_vg(parents=[pv, pv2])
b.create_device(vg)
# new lv with base size 5GiB and growth up to 15GiB and an ext4 filesystem
lv = b.new_lv(fmt_type="ext4", size=Size("5GiB"), parents=[vg], name="cached")
b.create_device(lv)
# new cache pool
cpool = b.new_lv(size=Size("1 GiB"), parents=[vg], pvs=[pv2], cache_pool=True, name="fastlv")
b.create_device(cpool)
# write the new partitions to disk and format them as specified
b.do_it()
print(b.devicetree)
# attach the newly created cache pool to the "slow" LV
lv.attach_cache(cpool)
b.reset()
print(b.devicetree)
input("Check the state and hit ENTER to trigger cleanup")
finally:
b.devicetree.teardown_disk_images()
os.unlink(disk1_file)
os.unlink(disk2_file)
|
examples: Add LVM cache pool exampleimport os
import blivet
from blivet.size import Size
from blivet.util import set_up_logging, create_sparse_tempfile
set_up_logging()
b = blivet.Blivet() # create an instance of Blivet (don't add system devices)
# create a disk image file on which to create new devices
disk1_file = create_sparse_tempfile("disk1", Size("100GiB"))
b.disk_images["disk1"] = disk1_file
disk2_file = create_sparse_tempfile("disk2", Size("100GiB"))
b.disk_images["disk2"] = disk2_file
b.reset()
try:
disk1 = b.devicetree.get_device_by_name("disk1")
disk2 = b.devicetree.get_device_by_name("disk2")
b.initialize_disk(disk1)
b.initialize_disk(disk2)
pv = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk1])
b.create_device(pv)
pv2 = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk2])
b.create_device(pv2)
# allocate the partitions (decide where and on which disks they'll reside)
blivet.partitioning.do_partitioning(b)
vg = b.new_vg(parents=[pv, pv2])
b.create_device(vg)
# new lv with base size 5GiB and growth up to 15GiB and an ext4 filesystem
lv = b.new_lv(fmt_type="ext4", size=Size("5GiB"), parents=[vg], name="cached")
b.create_device(lv)
# new cache pool
cpool = b.new_lv(size=Size("1 GiB"), parents=[vg], pvs=[pv2], cache_pool=True, name="fastlv")
b.create_device(cpool)
# write the new partitions to disk and format them as specified
b.do_it()
print(b.devicetree)
# attach the newly created cache pool to the "slow" LV
lv.attach_cache(cpool)
b.reset()
print(b.devicetree)
input("Check the state and hit ENTER to trigger cleanup")
finally:
b.devicetree.teardown_disk_images()
os.unlink(disk1_file)
os.unlink(disk2_file)
|
<commit_before><commit_msg>examples: Add LVM cache pool example<commit_after>import os
import blivet
from blivet.size import Size
from blivet.util import set_up_logging, create_sparse_tempfile
set_up_logging()
b = blivet.Blivet() # create an instance of Blivet (don't add system devices)
# create a disk image file on which to create new devices
disk1_file = create_sparse_tempfile("disk1", Size("100GiB"))
b.disk_images["disk1"] = disk1_file
disk2_file = create_sparse_tempfile("disk2", Size("100GiB"))
b.disk_images["disk2"] = disk2_file
b.reset()
try:
disk1 = b.devicetree.get_device_by_name("disk1")
disk2 = b.devicetree.get_device_by_name("disk2")
b.initialize_disk(disk1)
b.initialize_disk(disk2)
pv = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk1])
b.create_device(pv)
pv2 = b.new_partition(size=Size("50GiB"), fmt_type="lvmpv", parents=[disk2])
b.create_device(pv2)
# allocate the partitions (decide where and on which disks they'll reside)
blivet.partitioning.do_partitioning(b)
vg = b.new_vg(parents=[pv, pv2])
b.create_device(vg)
# new lv with base size 5GiB and growth up to 15GiB and an ext4 filesystem
lv = b.new_lv(fmt_type="ext4", size=Size("5GiB"), parents=[vg], name="cached")
b.create_device(lv)
# new cache pool
cpool = b.new_lv(size=Size("1 GiB"), parents=[vg], pvs=[pv2], cache_pool=True, name="fastlv")
b.create_device(cpool)
# write the new partitions to disk and format them as specified
b.do_it()
print(b.devicetree)
# attach the newly created cache pool to the "slow" LV
lv.attach_cache(cpool)
b.reset()
print(b.devicetree)
input("Check the state and hit ENTER to trigger cleanup")
finally:
b.devicetree.teardown_disk_images()
os.unlink(disk1_file)
os.unlink(disk2_file)
|
|
3fd0269492c83b4fbb05e88e27c2c9b42b4868fd
|
comics/comics/geekandpoke.py
|
comics/comics/geekandpoke.py
|
from comics.aggregator.crawler import BaseComicCrawler
from comics.meta.base import BaseComicMeta
class ComicMeta(BaseComicMeta):
name = 'Geek and Poke'
language = 'en'
url = 'http://www.geekandpoke.com/'
start_date = '2006-08-22'
history_capable_days = 32
schedule = 'Mo,Tu,We,Th,Fr,Sa,Su'
time_zone = 1
rights = 'Oliver Widder, CC BY-ND 2.0'
class ComicCrawler(BaseComicCrawler):
def crawl(self):
feed = self.parse_feed(
'http://geekandpoke.typepad.com/geekandpoke/atom.xml')
for entry in feed.for_date(self.pub_date):
self.url = entry.content0.src('img.asset-image')
self.title = entry.title
self.text = entry.content0.alt('img.asset-image')
|
Add crawler for 'Geek and Poke'
|
Add crawler for 'Geek and Poke'
|
Python
|
agpl-3.0
|
datagutten/comics,datagutten/comics,klette/comics,jodal/comics,jodal/comics,jodal/comics,klette/comics,jodal/comics,datagutten/comics,klette/comics,datagutten/comics
|
Add crawler for 'Geek and Poke'
|
from comics.aggregator.crawler import BaseComicCrawler
from comics.meta.base import BaseComicMeta
class ComicMeta(BaseComicMeta):
name = 'Geek and Poke'
language = 'en'
url = 'http://www.geekandpoke.com/'
start_date = '2006-08-22'
history_capable_days = 32
schedule = 'Mo,Tu,We,Th,Fr,Sa,Su'
time_zone = 1
rights = 'Oliver Widder, CC BY-ND 2.0'
class ComicCrawler(BaseComicCrawler):
def crawl(self):
feed = self.parse_feed(
'http://geekandpoke.typepad.com/geekandpoke/atom.xml')
for entry in feed.for_date(self.pub_date):
self.url = entry.content0.src('img.asset-image')
self.title = entry.title
self.text = entry.content0.alt('img.asset-image')
|
<commit_before><commit_msg>Add crawler for 'Geek and Poke'<commit_after>
|
from comics.aggregator.crawler import BaseComicCrawler
from comics.meta.base import BaseComicMeta
class ComicMeta(BaseComicMeta):
name = 'Geek and Poke'
language = 'en'
url = 'http://www.geekandpoke.com/'
start_date = '2006-08-22'
history_capable_days = 32
schedule = 'Mo,Tu,We,Th,Fr,Sa,Su'
time_zone = 1
rights = 'Oliver Widder, CC BY-ND 2.0'
class ComicCrawler(BaseComicCrawler):
def crawl(self):
feed = self.parse_feed(
'http://geekandpoke.typepad.com/geekandpoke/atom.xml')
for entry in feed.for_date(self.pub_date):
self.url = entry.content0.src('img.asset-image')
self.title = entry.title
self.text = entry.content0.alt('img.asset-image')
|
Add crawler for 'Geek and Poke'from comics.aggregator.crawler import BaseComicCrawler
from comics.meta.base import BaseComicMeta
class ComicMeta(BaseComicMeta):
name = 'Geek and Poke'
language = 'en'
url = 'http://www.geekandpoke.com/'
start_date = '2006-08-22'
history_capable_days = 32
schedule = 'Mo,Tu,We,Th,Fr,Sa,Su'
time_zone = 1
rights = 'Oliver Widder, CC BY-ND 2.0'
class ComicCrawler(BaseComicCrawler):
def crawl(self):
feed = self.parse_feed(
'http://geekandpoke.typepad.com/geekandpoke/atom.xml')
for entry in feed.for_date(self.pub_date):
self.url = entry.content0.src('img.asset-image')
self.title = entry.title
self.text = entry.content0.alt('img.asset-image')
|
<commit_before><commit_msg>Add crawler for 'Geek and Poke'<commit_after>from comics.aggregator.crawler import BaseComicCrawler
from comics.meta.base import BaseComicMeta
class ComicMeta(BaseComicMeta):
name = 'Geek and Poke'
language = 'en'
url = 'http://www.geekandpoke.com/'
start_date = '2006-08-22'
history_capable_days = 32
schedule = 'Mo,Tu,We,Th,Fr,Sa,Su'
time_zone = 1
rights = 'Oliver Widder, CC BY-ND 2.0'
class ComicCrawler(BaseComicCrawler):
def crawl(self):
feed = self.parse_feed(
'http://geekandpoke.typepad.com/geekandpoke/atom.xml')
for entry in feed.for_date(self.pub_date):
self.url = entry.content0.src('img.asset-image')
self.title = entry.title
self.text = entry.content0.alt('img.asset-image')
|
|
936a01efe745e8596a23142895a385d47611ff15
|
tests/providers/test_bank.py
|
tests/providers/test_bank.py
|
# coding=utf-8
import re
import unittest
from faker import Faker
class TestNoNO(unittest.TestCase):
""" Tests the street address in no_NO locale """
def setUp(self):
self.factory = Faker('no_NO')
def test_bban(self):
bban = self.factory.bban()
assert re.match("\d{11}", bban)
|
Add a unit test for the no_NO bban
|
Add a unit test for the no_NO bban
|
Python
|
mit
|
joke2k/faker,joke2k/faker,danhuss/faker
|
Add a unit test for the no_NO bban
|
# coding=utf-8
import re
import unittest
from faker import Faker
class TestNoNO(unittest.TestCase):
""" Tests the street address in no_NO locale """
def setUp(self):
self.factory = Faker('no_NO')
def test_bban(self):
bban = self.factory.bban()
assert re.match("\d{11}", bban)
|
<commit_before><commit_msg>Add a unit test for the no_NO bban<commit_after>
|
# coding=utf-8
import re
import unittest
from faker import Faker
class TestNoNO(unittest.TestCase):
""" Tests the street address in no_NO locale """
def setUp(self):
self.factory = Faker('no_NO')
def test_bban(self):
bban = self.factory.bban()
assert re.match("\d{11}", bban)
|
Add a unit test for the no_NO bban# coding=utf-8
import re
import unittest
from faker import Faker
class TestNoNO(unittest.TestCase):
""" Tests the street address in no_NO locale """
def setUp(self):
self.factory = Faker('no_NO')
def test_bban(self):
bban = self.factory.bban()
assert re.match("\d{11}", bban)
|
<commit_before><commit_msg>Add a unit test for the no_NO bban<commit_after># coding=utf-8
import re
import unittest
from faker import Faker
class TestNoNO(unittest.TestCase):
""" Tests the street address in no_NO locale """
def setUp(self):
self.factory = Faker('no_NO')
def test_bban(self):
bban = self.factory.bban()
assert re.match("\d{11}", bban)
|
|
bfda3309085919bd849e7478e1335a9da5dfc792
|
notebooks_preprocess.py
|
notebooks_preprocess.py
|
#!/usr/bin/env python
import json
import re
from pathlib import Path
def format_script_for_cell(path):
"""Read and format a .py file to be inserted into the json for a cell."""
header = '\n# Cell content replaced by load magic replacement.\n'
with open(path) as f:
return header + f.read()
def find_load_magics_in_cell(cell):
"""Find the load magics in a cell and return them as a list."""
load_magics = []
for cell_source_line in cell['source']:
m = re.match('#\s?%load.*', cell_source_line)
if m:
load_magics.append(m.group())
return load_magics
def get_cell_content_as_string(cell):
"""Return the cells source as a single string."""
return ''.join(cell['source']) + '\n'
def process_cell(path, cell):
"""Append the data from the load magics into the cell content."""
modified = False
# See if there are any load magics used
load_magics = find_load_magics_in_cell(cell)
# Replace the load magics with content from their recpective files
for magic_string in load_magics:
path = Path(path)
script_path = path.parent / magic_string.split('load ')[1]
formatted_script = format_script_for_cell(script_path)
cell_str = get_cell_content_as_string(cell)
cell['source'] = cell_str + formatted_script
modified = True
return modified
# Recursively grab all notebooks and process them
notebooks = Path('notebooks').rglob('*.ipynb')
for notebook in notebooks:
if not str(notebook.parts[-2]).startswith('.'):
modified = False
# Read in the notebook as JSON data
print('Reading notebook: {}'.format(notebook))
with open(notebook, 'r') as f:
json_data = json.load(f)
# Process each cell in the file
for cell in json_data['cells']:
modified = process_cell(notebook, cell) or modified
# Write out the modified notebook
if modified:
print('Writing notebook: {}\n'.format(notebook))
with open(notebook, 'w') as outfile:
json.dump(json_data, outfile)
else:
print('Notebook not modified.\n')
|
Add notebook pre processing script to replace load magics.
|
Add notebook pre processing script to replace load magics.
|
Python
|
mit
|
julienchastang/unidata-python-workshop,Unidata/unidata-python-workshop,julienchastang/unidata-python-workshop
|
Add notebook pre processing script to replace load magics.
|
#!/usr/bin/env python
import json
import re
from pathlib import Path
def format_script_for_cell(path):
"""Read and format a .py file to be inserted into the json for a cell."""
header = '\n# Cell content replaced by load magic replacement.\n'
with open(path) as f:
return header + f.read()
def find_load_magics_in_cell(cell):
"""Find the load magics in a cell and return them as a list."""
load_magics = []
for cell_source_line in cell['source']:
m = re.match('#\s?%load.*', cell_source_line)
if m:
load_magics.append(m.group())
return load_magics
def get_cell_content_as_string(cell):
"""Return the cells source as a single string."""
return ''.join(cell['source']) + '\n'
def process_cell(path, cell):
"""Append the data from the load magics into the cell content."""
modified = False
# See if there are any load magics used
load_magics = find_load_magics_in_cell(cell)
# Replace the load magics with content from their recpective files
for magic_string in load_magics:
path = Path(path)
script_path = path.parent / magic_string.split('load ')[1]
formatted_script = format_script_for_cell(script_path)
cell_str = get_cell_content_as_string(cell)
cell['source'] = cell_str + formatted_script
modified = True
return modified
# Recursively grab all notebooks and process them
notebooks = Path('notebooks').rglob('*.ipynb')
for notebook in notebooks:
if not str(notebook.parts[-2]).startswith('.'):
modified = False
# Read in the notebook as JSON data
print('Reading notebook: {}'.format(notebook))
with open(notebook, 'r') as f:
json_data = json.load(f)
# Process each cell in the file
for cell in json_data['cells']:
modified = process_cell(notebook, cell) or modified
# Write out the modified notebook
if modified:
print('Writing notebook: {}\n'.format(notebook))
with open(notebook, 'w') as outfile:
json.dump(json_data, outfile)
else:
print('Notebook not modified.\n')
|
<commit_before><commit_msg>Add notebook pre processing script to replace load magics.<commit_after>
|
#!/usr/bin/env python
import json
import re
from pathlib import Path
def format_script_for_cell(path):
"""Read and format a .py file to be inserted into the json for a cell."""
header = '\n# Cell content replaced by load magic replacement.\n'
with open(path) as f:
return header + f.read()
def find_load_magics_in_cell(cell):
"""Find the load magics in a cell and return them as a list."""
load_magics = []
for cell_source_line in cell['source']:
m = re.match('#\s?%load.*', cell_source_line)
if m:
load_magics.append(m.group())
return load_magics
def get_cell_content_as_string(cell):
"""Return the cells source as a single string."""
return ''.join(cell['source']) + '\n'
def process_cell(path, cell):
"""Append the data from the load magics into the cell content."""
modified = False
# See if there are any load magics used
load_magics = find_load_magics_in_cell(cell)
# Replace the load magics with content from their recpective files
for magic_string in load_magics:
path = Path(path)
script_path = path.parent / magic_string.split('load ')[1]
formatted_script = format_script_for_cell(script_path)
cell_str = get_cell_content_as_string(cell)
cell['source'] = cell_str + formatted_script
modified = True
return modified
# Recursively grab all notebooks and process them
notebooks = Path('notebooks').rglob('*.ipynb')
for notebook in notebooks:
if not str(notebook.parts[-2]).startswith('.'):
modified = False
# Read in the notebook as JSON data
print('Reading notebook: {}'.format(notebook))
with open(notebook, 'r') as f:
json_data = json.load(f)
# Process each cell in the file
for cell in json_data['cells']:
modified = process_cell(notebook, cell) or modified
# Write out the modified notebook
if modified:
print('Writing notebook: {}\n'.format(notebook))
with open(notebook, 'w') as outfile:
json.dump(json_data, outfile)
else:
print('Notebook not modified.\n')
|
Add notebook pre processing script to replace load magics.#!/usr/bin/env python
import json
import re
from pathlib import Path
def format_script_for_cell(path):
"""Read and format a .py file to be inserted into the json for a cell."""
header = '\n# Cell content replaced by load magic replacement.\n'
with open(path) as f:
return header + f.read()
def find_load_magics_in_cell(cell):
"""Find the load magics in a cell and return them as a list."""
load_magics = []
for cell_source_line in cell['source']:
m = re.match('#\s?%load.*', cell_source_line)
if m:
load_magics.append(m.group())
return load_magics
def get_cell_content_as_string(cell):
"""Return the cells source as a single string."""
return ''.join(cell['source']) + '\n'
def process_cell(path, cell):
"""Append the data from the load magics into the cell content."""
modified = False
# See if there are any load magics used
load_magics = find_load_magics_in_cell(cell)
# Replace the load magics with content from their recpective files
for magic_string in load_magics:
path = Path(path)
script_path = path.parent / magic_string.split('load ')[1]
formatted_script = format_script_for_cell(script_path)
cell_str = get_cell_content_as_string(cell)
cell['source'] = cell_str + formatted_script
modified = True
return modified
# Recursively grab all notebooks and process them
notebooks = Path('notebooks').rglob('*.ipynb')
for notebook in notebooks:
if not str(notebook.parts[-2]).startswith('.'):
modified = False
# Read in the notebook as JSON data
print('Reading notebook: {}'.format(notebook))
with open(notebook, 'r') as f:
json_data = json.load(f)
# Process each cell in the file
for cell in json_data['cells']:
modified = process_cell(notebook, cell) or modified
# Write out the modified notebook
if modified:
print('Writing notebook: {}\n'.format(notebook))
with open(notebook, 'w') as outfile:
json.dump(json_data, outfile)
else:
print('Notebook not modified.\n')
|
<commit_before><commit_msg>Add notebook pre processing script to replace load magics.<commit_after>#!/usr/bin/env python
import json
import re
from pathlib import Path
def format_script_for_cell(path):
"""Read and format a .py file to be inserted into the json for a cell."""
header = '\n# Cell content replaced by load magic replacement.\n'
with open(path) as f:
return header + f.read()
def find_load_magics_in_cell(cell):
"""Find the load magics in a cell and return them as a list."""
load_magics = []
for cell_source_line in cell['source']:
m = re.match('#\s?%load.*', cell_source_line)
if m:
load_magics.append(m.group())
return load_magics
def get_cell_content_as_string(cell):
"""Return the cells source as a single string."""
return ''.join(cell['source']) + '\n'
def process_cell(path, cell):
"""Append the data from the load magics into the cell content."""
modified = False
# See if there are any load magics used
load_magics = find_load_magics_in_cell(cell)
# Replace the load magics with content from their recpective files
for magic_string in load_magics:
path = Path(path)
script_path = path.parent / magic_string.split('load ')[1]
formatted_script = format_script_for_cell(script_path)
cell_str = get_cell_content_as_string(cell)
cell['source'] = cell_str + formatted_script
modified = True
return modified
# Recursively grab all notebooks and process them
notebooks = Path('notebooks').rglob('*.ipynb')
for notebook in notebooks:
if not str(notebook.parts[-2]).startswith('.'):
modified = False
# Read in the notebook as JSON data
print('Reading notebook: {}'.format(notebook))
with open(notebook, 'r') as f:
json_data = json.load(f)
# Process each cell in the file
for cell in json_data['cells']:
modified = process_cell(notebook, cell) or modified
# Write out the modified notebook
if modified:
print('Writing notebook: {}\n'.format(notebook))
with open(notebook, 'w') as outfile:
json.dump(json_data, outfile)
else:
print('Notebook not modified.\n')
|
|
3577db65eceabd6c5ceb9858f45faee65ce0bbdf
|
zforce.py
|
zforce.py
|
import zipfile
def bf_extract(zfile, password):
zip = zipfile.ZipFile(zfile)
try:
zip.setpassword(password)
zip.extractall()
except:
pass
finally:
zip.close()
if __name__ == "__main__":
bf_extract("spmv.zip", "ok")
|
Add function to extract zip file with passowrd
|
Add function to extract zip file with passowrd
|
Python
|
apache-2.0
|
alexst07/ZipBruteforce
|
Add function to extract zip file with passowrd
|
import zipfile
def bf_extract(zfile, password):
zip = zipfile.ZipFile(zfile)
try:
zip.setpassword(password)
zip.extractall()
except:
pass
finally:
zip.close()
if __name__ == "__main__":
bf_extract("spmv.zip", "ok")
|
<commit_before><commit_msg>Add function to extract zip file with passowrd<commit_after>
|
import zipfile
def bf_extract(zfile, password):
zip = zipfile.ZipFile(zfile)
try:
zip.setpassword(password)
zip.extractall()
except:
pass
finally:
zip.close()
if __name__ == "__main__":
bf_extract("spmv.zip", "ok")
|
Add function to extract zip file with passowrdimport zipfile
def bf_extract(zfile, password):
zip = zipfile.ZipFile(zfile)
try:
zip.setpassword(password)
zip.extractall()
except:
pass
finally:
zip.close()
if __name__ == "__main__":
bf_extract("spmv.zip", "ok")
|
<commit_before><commit_msg>Add function to extract zip file with passowrd<commit_after>import zipfile
def bf_extract(zfile, password):
zip = zipfile.ZipFile(zfile)
try:
zip.setpassword(password)
zip.extractall()
except:
pass
finally:
zip.close()
if __name__ == "__main__":
bf_extract("spmv.zip", "ok")
|
|
d65502a67a3ae356f7af3c40f68139ca90b66ed0
|
py/sp_test.py
|
py/sp_test.py
|
#!/usr/bin/python
"Test spiffs filesystem with a range of parameters"
import spiffs
class SpiffsInstr(spiffs.SpiffsCharsBack):
"Keeps count of issued reads / writes / erases"
def __init__(self, *args, **kwargs):
self.reset_counters()
supe = super(SpiffsInstr, self)
self.super_read = supe.on_read
self.super_write = supe.on_write
self.super_erase = supe.on_erase
supe.__init__(*args, **kwargs)
def reset_counters(self):
self.read_requests = []
self.write_requests = []
self.erase_requests = []
def on_read(self, addr, size):
self.read_requests.append([addr, size])
return self.super_read(addr, size)
def on_write(self, addr, data):
self.write_requests.append([addr, len(data)])
return self.super_write(addr, data)
def on_erase(self, addr, size):
self.erase_requests.append([addr, size])
return self.super_erase(addr, size)
# Physical parameters
flash_size = 8*1024*1024
erase_size = 256
print "filesystem size =",flash_size
header = "log_block_size","log_page_size","reads","read_bytes","writes","written_bytes"
print '| %s |'%(' | '.join(header))
header2 = ['-'*len(x) for x in header]
print '| %s |'%(' | '.join(header2))
for log2_log_block_size in range(14,19):
log_block_size = 1<<log2_log_block_size
for log2_log_page_size in range(8,11):
log_page_size = 1<<log2_log_page_size
backing = ['\xff']*flash_size
s = SpiffsInstr(backing,
0,
erase_size,
log_page_size,
log_block_size)
s.dir()
#for i in range(100):
# with s.open("Test File %d.txt"%i,"w") as fd:
# fd.write('.'*1024*10)
s.unmount()
result = (log_block_size,log_page_size,
len(s.read_requests), sum(size for block,size in s.read_requests),
len(s.write_requests), sum(size for block,size in s.write_requests))
print '| %s |'%(' | '.join(map(str,result)))
|
Test script for parameter detemination.
|
Test script for parameter detemination.
see https://github.com/pellepl/spiffs/issues/169
|
Python
|
mit
|
pellepl/spiffs,pellepl/spiffs
|
Test script for parameter detemination.
see https://github.com/pellepl/spiffs/issues/169
|
#!/usr/bin/python
"Test spiffs filesystem with a range of parameters"
import spiffs
class SpiffsInstr(spiffs.SpiffsCharsBack):
"Keeps count of issued reads / writes / erases"
def __init__(self, *args, **kwargs):
self.reset_counters()
supe = super(SpiffsInstr, self)
self.super_read = supe.on_read
self.super_write = supe.on_write
self.super_erase = supe.on_erase
supe.__init__(*args, **kwargs)
def reset_counters(self):
self.read_requests = []
self.write_requests = []
self.erase_requests = []
def on_read(self, addr, size):
self.read_requests.append([addr, size])
return self.super_read(addr, size)
def on_write(self, addr, data):
self.write_requests.append([addr, len(data)])
return self.super_write(addr, data)
def on_erase(self, addr, size):
self.erase_requests.append([addr, size])
return self.super_erase(addr, size)
# Physical parameters
flash_size = 8*1024*1024
erase_size = 256
print "filesystem size =",flash_size
header = "log_block_size","log_page_size","reads","read_bytes","writes","written_bytes"
print '| %s |'%(' | '.join(header))
header2 = ['-'*len(x) for x in header]
print '| %s |'%(' | '.join(header2))
for log2_log_block_size in range(14,19):
log_block_size = 1<<log2_log_block_size
for log2_log_page_size in range(8,11):
log_page_size = 1<<log2_log_page_size
backing = ['\xff']*flash_size
s = SpiffsInstr(backing,
0,
erase_size,
log_page_size,
log_block_size)
s.dir()
#for i in range(100):
# with s.open("Test File %d.txt"%i,"w") as fd:
# fd.write('.'*1024*10)
s.unmount()
result = (log_block_size,log_page_size,
len(s.read_requests), sum(size for block,size in s.read_requests),
len(s.write_requests), sum(size for block,size in s.write_requests))
print '| %s |'%(' | '.join(map(str,result)))
|
<commit_before><commit_msg>Test script for parameter detemination.
see https://github.com/pellepl/spiffs/issues/169<commit_after>
|
#!/usr/bin/python
"Test spiffs filesystem with a range of parameters"
import spiffs
class SpiffsInstr(spiffs.SpiffsCharsBack):
"Keeps count of issued reads / writes / erases"
def __init__(self, *args, **kwargs):
self.reset_counters()
supe = super(SpiffsInstr, self)
self.super_read = supe.on_read
self.super_write = supe.on_write
self.super_erase = supe.on_erase
supe.__init__(*args, **kwargs)
def reset_counters(self):
self.read_requests = []
self.write_requests = []
self.erase_requests = []
def on_read(self, addr, size):
self.read_requests.append([addr, size])
return self.super_read(addr, size)
def on_write(self, addr, data):
self.write_requests.append([addr, len(data)])
return self.super_write(addr, data)
def on_erase(self, addr, size):
self.erase_requests.append([addr, size])
return self.super_erase(addr, size)
# Physical parameters
flash_size = 8*1024*1024
erase_size = 256
print "filesystem size =",flash_size
header = "log_block_size","log_page_size","reads","read_bytes","writes","written_bytes"
print '| %s |'%(' | '.join(header))
header2 = ['-'*len(x) for x in header]
print '| %s |'%(' | '.join(header2))
for log2_log_block_size in range(14,19):
log_block_size = 1<<log2_log_block_size
for log2_log_page_size in range(8,11):
log_page_size = 1<<log2_log_page_size
backing = ['\xff']*flash_size
s = SpiffsInstr(backing,
0,
erase_size,
log_page_size,
log_block_size)
s.dir()
#for i in range(100):
# with s.open("Test File %d.txt"%i,"w") as fd:
# fd.write('.'*1024*10)
s.unmount()
result = (log_block_size,log_page_size,
len(s.read_requests), sum(size for block,size in s.read_requests),
len(s.write_requests), sum(size for block,size in s.write_requests))
print '| %s |'%(' | '.join(map(str,result)))
|
Test script for parameter detemination.
see https://github.com/pellepl/spiffs/issues/169#!/usr/bin/python
"Test spiffs filesystem with a range of parameters"
import spiffs
class SpiffsInstr(spiffs.SpiffsCharsBack):
"Keeps count of issued reads / writes / erases"
def __init__(self, *args, **kwargs):
self.reset_counters()
supe = super(SpiffsInstr, self)
self.super_read = supe.on_read
self.super_write = supe.on_write
self.super_erase = supe.on_erase
supe.__init__(*args, **kwargs)
def reset_counters(self):
self.read_requests = []
self.write_requests = []
self.erase_requests = []
def on_read(self, addr, size):
self.read_requests.append([addr, size])
return self.super_read(addr, size)
def on_write(self, addr, data):
self.write_requests.append([addr, len(data)])
return self.super_write(addr, data)
def on_erase(self, addr, size):
self.erase_requests.append([addr, size])
return self.super_erase(addr, size)
# Physical parameters
flash_size = 8*1024*1024
erase_size = 256
print "filesystem size =",flash_size
header = "log_block_size","log_page_size","reads","read_bytes","writes","written_bytes"
print '| %s |'%(' | '.join(header))
header2 = ['-'*len(x) for x in header]
print '| %s |'%(' | '.join(header2))
for log2_log_block_size in range(14,19):
log_block_size = 1<<log2_log_block_size
for log2_log_page_size in range(8,11):
log_page_size = 1<<log2_log_page_size
backing = ['\xff']*flash_size
s = SpiffsInstr(backing,
0,
erase_size,
log_page_size,
log_block_size)
s.dir()
#for i in range(100):
# with s.open("Test File %d.txt"%i,"w") as fd:
# fd.write('.'*1024*10)
s.unmount()
result = (log_block_size,log_page_size,
len(s.read_requests), sum(size for block,size in s.read_requests),
len(s.write_requests), sum(size for block,size in s.write_requests))
print '| %s |'%(' | '.join(map(str,result)))
|
<commit_before><commit_msg>Test script for parameter detemination.
see https://github.com/pellepl/spiffs/issues/169<commit_after>#!/usr/bin/python
"Test spiffs filesystem with a range of parameters"
import spiffs
class SpiffsInstr(spiffs.SpiffsCharsBack):
"Keeps count of issued reads / writes / erases"
def __init__(self, *args, **kwargs):
self.reset_counters()
supe = super(SpiffsInstr, self)
self.super_read = supe.on_read
self.super_write = supe.on_write
self.super_erase = supe.on_erase
supe.__init__(*args, **kwargs)
def reset_counters(self):
self.read_requests = []
self.write_requests = []
self.erase_requests = []
def on_read(self, addr, size):
self.read_requests.append([addr, size])
return self.super_read(addr, size)
def on_write(self, addr, data):
self.write_requests.append([addr, len(data)])
return self.super_write(addr, data)
def on_erase(self, addr, size):
self.erase_requests.append([addr, size])
return self.super_erase(addr, size)
# Physical parameters
flash_size = 8*1024*1024
erase_size = 256
print "filesystem size =",flash_size
header = "log_block_size","log_page_size","reads","read_bytes","writes","written_bytes"
print '| %s |'%(' | '.join(header))
header2 = ['-'*len(x) for x in header]
print '| %s |'%(' | '.join(header2))
for log2_log_block_size in range(14,19):
log_block_size = 1<<log2_log_block_size
for log2_log_page_size in range(8,11):
log_page_size = 1<<log2_log_page_size
backing = ['\xff']*flash_size
s = SpiffsInstr(backing,
0,
erase_size,
log_page_size,
log_block_size)
s.dir()
#for i in range(100):
# with s.open("Test File %d.txt"%i,"w") as fd:
# fd.write('.'*1024*10)
s.unmount()
result = (log_block_size,log_page_size,
len(s.read_requests), sum(size for block,size in s.read_requests),
len(s.write_requests), sum(size for block,size in s.write_requests))
print '| %s |'%(' | '.join(map(str,result)))
|
|
a513c18975fb5711b0fb2cefbf4496379747daa9
|
cyrasterize/run_test.py
|
cyrasterize/run_test.py
|
import numpy as np
from cyrasterize import CyRasterizer
from numpy.testing import assert_allclose
def test_basic_random():
c = CyRasterizer(width=100, height=100)
points = np.array([[-1, -1, 0], [1, -1, 0], [1, 1, 0], [-1, 1, 0]])
trilist = np.array([[0, 1, 2], [2, 3, 0]])
colours = np.random.uniform(size=(100, 100, 3))
tcoords = np.array([[0, 0], [1, 0], [1, 1], [0, 1]])
rgb_image, float_image, mask = c.rasterize(points, trilist, colours, tcoords)
assert_allclose(rgb_image, colours)
if __name__ == "__main__":
test_basic_random()
|
Add CyRasterize test to script
|
Add CyRasterize test to script
Duplicates nose test I had added to the cyrasterize package
|
Python
|
bsd-3-clause
|
menpo/conda-recipes,menpo/conda-recipes
|
Add CyRasterize test to script
Duplicates nose test I had added to the cyrasterize package
|
import numpy as np
from cyrasterize import CyRasterizer
from numpy.testing import assert_allclose
def test_basic_random():
c = CyRasterizer(width=100, height=100)
points = np.array([[-1, -1, 0], [1, -1, 0], [1, 1, 0], [-1, 1, 0]])
trilist = np.array([[0, 1, 2], [2, 3, 0]])
colours = np.random.uniform(size=(100, 100, 3))
tcoords = np.array([[0, 0], [1, 0], [1, 1], [0, 1]])
rgb_image, float_image, mask = c.rasterize(points, trilist, colours, tcoords)
assert_allclose(rgb_image, colours)
if __name__ == "__main__":
test_basic_random()
|
<commit_before><commit_msg>Add CyRasterize test to script
Duplicates nose test I had added to the cyrasterize package<commit_after>
|
import numpy as np
from cyrasterize import CyRasterizer
from numpy.testing import assert_allclose
def test_basic_random():
c = CyRasterizer(width=100, height=100)
points = np.array([[-1, -1, 0], [1, -1, 0], [1, 1, 0], [-1, 1, 0]])
trilist = np.array([[0, 1, 2], [2, 3, 0]])
colours = np.random.uniform(size=(100, 100, 3))
tcoords = np.array([[0, 0], [1, 0], [1, 1], [0, 1]])
rgb_image, float_image, mask = c.rasterize(points, trilist, colours, tcoords)
assert_allclose(rgb_image, colours)
if __name__ == "__main__":
test_basic_random()
|
Add CyRasterize test to script
Duplicates nose test I had added to the cyrasterize packageimport numpy as np
from cyrasterize import CyRasterizer
from numpy.testing import assert_allclose
def test_basic_random():
c = CyRasterizer(width=100, height=100)
points = np.array([[-1, -1, 0], [1, -1, 0], [1, 1, 0], [-1, 1, 0]])
trilist = np.array([[0, 1, 2], [2, 3, 0]])
colours = np.random.uniform(size=(100, 100, 3))
tcoords = np.array([[0, 0], [1, 0], [1, 1], [0, 1]])
rgb_image, float_image, mask = c.rasterize(points, trilist, colours, tcoords)
assert_allclose(rgb_image, colours)
if __name__ == "__main__":
test_basic_random()
|
<commit_before><commit_msg>Add CyRasterize test to script
Duplicates nose test I had added to the cyrasterize package<commit_after>import numpy as np
from cyrasterize import CyRasterizer
from numpy.testing import assert_allclose
def test_basic_random():
c = CyRasterizer(width=100, height=100)
points = np.array([[-1, -1, 0], [1, -1, 0], [1, 1, 0], [-1, 1, 0]])
trilist = np.array([[0, 1, 2], [2, 3, 0]])
colours = np.random.uniform(size=(100, 100, 3))
tcoords = np.array([[0, 0], [1, 0], [1, 1], [0, 1]])
rgb_image, float_image, mask = c.rasterize(points, trilist, colours, tcoords)
assert_allclose(rgb_image, colours)
if __name__ == "__main__":
test_basic_random()
|
|
e87ba8f0a36c2d6ed9b058efd6fd91ed680af82c
|
twilio/contrib/jwt/__init__.py
|
twilio/contrib/jwt/__init__.py
|
""" JSON Web Token implementation
Minimum implementation based on this spec:
http://self-issued.info/docs/draft-jones-json-web-token-01.html
"""
import base64
import hashlib
import hmac
try:
import json
except ImportError:
import simplejson as json
__all__ = ['encode', 'decode', 'DecodeError']
class DecodeError(Exception): pass
signing_methods = {
'HS256': lambda msg, key: hmac.new(key, msg, hashlib.sha256).digest(),
'HS384': lambda msg, key: hmac.new(key, msg, hashlib.sha384).digest(),
'HS512': lambda msg, key: hmac.new(key, msg, hashlib.sha512).digest(),
}
def base64url_decode(input):
input += '=' * (4 - (len(input) % 4))
return base64.urlsafe_b64decode(input)
def base64url_encode(input):
return base64.urlsafe_b64encode(input).replace('=', '')
def header(jwt):
header_segment = jwt.split('.', 1)[0]
try:
return json.loads(base64url_decode(header_segment))
except (ValueError, TypeError):
raise DecodeError("Invalid header encoding")
def encode(payload, key, algorithm='HS256'):
segments = []
header = {"typ": "JWT", "alg": algorithm}
segments.append(base64url_encode(json.dumps(header)))
segments.append(base64url_encode(json.dumps(payload)))
signing_input = '.'.join(segments)
try:
ascii_key = unicode(key).encode('utf8')
signature = signing_methods[algorithm](signing_input, ascii_key)
except KeyError:
raise NotImplementedError("Algorithm not supported")
segments.append(base64url_encode(signature))
return '.'.join(segments)
def decode(jwt, key='', verify=True):
try:
signing_input, crypto_segment = jwt.rsplit('.', 1)
header_segment, payload_segment = signing_input.split('.', 1)
except ValueError:
raise DecodeError("Not enough segments")
try:
header = json.loads(base64url_decode(header_segment))
payload = json.loads(base64url_decode(payload_segment))
signature = base64url_decode(crypto_segment)
except (ValueError, TypeError):
raise DecodeError("Invalid segment encoding")
if verify:
try:
ascii_key = unicode(key).encode('utf8')
if not signature == signing_methods[header['alg']](signing_input, ascii_key):
raise DecodeError("Signature verification failed")
except KeyError:
raise DecodeError("Algorithm not supported")
return payload
|
Add jwt library in contrib
|
Add jwt library in contrib
|
Python
|
mit
|
supermanheng21/twilio-python,YeelerG/twilio-python,Rosy-S/twilio-python,Stackdriver/twilio-python,twilio/twilio-python,cinemapub/bright-response,clearcare/twilio-python,Stackdriver/twilio-python,johannakate/twilio-python,RobSpectre/twilio-python,Mobii/twilio-python,tysonholub/twilio-python,cinemapub/bright-response,bcorwin/twilio-python
|
Add jwt library in contrib
|
""" JSON Web Token implementation
Minimum implementation based on this spec:
http://self-issued.info/docs/draft-jones-json-web-token-01.html
"""
import base64
import hashlib
import hmac
try:
import json
except ImportError:
import simplejson as json
__all__ = ['encode', 'decode', 'DecodeError']
class DecodeError(Exception): pass
signing_methods = {
'HS256': lambda msg, key: hmac.new(key, msg, hashlib.sha256).digest(),
'HS384': lambda msg, key: hmac.new(key, msg, hashlib.sha384).digest(),
'HS512': lambda msg, key: hmac.new(key, msg, hashlib.sha512).digest(),
}
def base64url_decode(input):
input += '=' * (4 - (len(input) % 4))
return base64.urlsafe_b64decode(input)
def base64url_encode(input):
return base64.urlsafe_b64encode(input).replace('=', '')
def header(jwt):
header_segment = jwt.split('.', 1)[0]
try:
return json.loads(base64url_decode(header_segment))
except (ValueError, TypeError):
raise DecodeError("Invalid header encoding")
def encode(payload, key, algorithm='HS256'):
segments = []
header = {"typ": "JWT", "alg": algorithm}
segments.append(base64url_encode(json.dumps(header)))
segments.append(base64url_encode(json.dumps(payload)))
signing_input = '.'.join(segments)
try:
ascii_key = unicode(key).encode('utf8')
signature = signing_methods[algorithm](signing_input, ascii_key)
except KeyError:
raise NotImplementedError("Algorithm not supported")
segments.append(base64url_encode(signature))
return '.'.join(segments)
def decode(jwt, key='', verify=True):
try:
signing_input, crypto_segment = jwt.rsplit('.', 1)
header_segment, payload_segment = signing_input.split('.', 1)
except ValueError:
raise DecodeError("Not enough segments")
try:
header = json.loads(base64url_decode(header_segment))
payload = json.loads(base64url_decode(payload_segment))
signature = base64url_decode(crypto_segment)
except (ValueError, TypeError):
raise DecodeError("Invalid segment encoding")
if verify:
try:
ascii_key = unicode(key).encode('utf8')
if not signature == signing_methods[header['alg']](signing_input, ascii_key):
raise DecodeError("Signature verification failed")
except KeyError:
raise DecodeError("Algorithm not supported")
return payload
|
<commit_before><commit_msg>Add jwt library in contrib<commit_after>
|
""" JSON Web Token implementation
Minimum implementation based on this spec:
http://self-issued.info/docs/draft-jones-json-web-token-01.html
"""
import base64
import hashlib
import hmac
try:
import json
except ImportError:
import simplejson as json
__all__ = ['encode', 'decode', 'DecodeError']
class DecodeError(Exception): pass
signing_methods = {
'HS256': lambda msg, key: hmac.new(key, msg, hashlib.sha256).digest(),
'HS384': lambda msg, key: hmac.new(key, msg, hashlib.sha384).digest(),
'HS512': lambda msg, key: hmac.new(key, msg, hashlib.sha512).digest(),
}
def base64url_decode(input):
input += '=' * (4 - (len(input) % 4))
return base64.urlsafe_b64decode(input)
def base64url_encode(input):
return base64.urlsafe_b64encode(input).replace('=', '')
def header(jwt):
header_segment = jwt.split('.', 1)[0]
try:
return json.loads(base64url_decode(header_segment))
except (ValueError, TypeError):
raise DecodeError("Invalid header encoding")
def encode(payload, key, algorithm='HS256'):
segments = []
header = {"typ": "JWT", "alg": algorithm}
segments.append(base64url_encode(json.dumps(header)))
segments.append(base64url_encode(json.dumps(payload)))
signing_input = '.'.join(segments)
try:
ascii_key = unicode(key).encode('utf8')
signature = signing_methods[algorithm](signing_input, ascii_key)
except KeyError:
raise NotImplementedError("Algorithm not supported")
segments.append(base64url_encode(signature))
return '.'.join(segments)
def decode(jwt, key='', verify=True):
try:
signing_input, crypto_segment = jwt.rsplit('.', 1)
header_segment, payload_segment = signing_input.split('.', 1)
except ValueError:
raise DecodeError("Not enough segments")
try:
header = json.loads(base64url_decode(header_segment))
payload = json.loads(base64url_decode(payload_segment))
signature = base64url_decode(crypto_segment)
except (ValueError, TypeError):
raise DecodeError("Invalid segment encoding")
if verify:
try:
ascii_key = unicode(key).encode('utf8')
if not signature == signing_methods[header['alg']](signing_input, ascii_key):
raise DecodeError("Signature verification failed")
except KeyError:
raise DecodeError("Algorithm not supported")
return payload
|
Add jwt library in contrib""" JSON Web Token implementation
Minimum implementation based on this spec:
http://self-issued.info/docs/draft-jones-json-web-token-01.html
"""
import base64
import hashlib
import hmac
try:
import json
except ImportError:
import simplejson as json
__all__ = ['encode', 'decode', 'DecodeError']
class DecodeError(Exception): pass
signing_methods = {
'HS256': lambda msg, key: hmac.new(key, msg, hashlib.sha256).digest(),
'HS384': lambda msg, key: hmac.new(key, msg, hashlib.sha384).digest(),
'HS512': lambda msg, key: hmac.new(key, msg, hashlib.sha512).digest(),
}
def base64url_decode(input):
input += '=' * (4 - (len(input) % 4))
return base64.urlsafe_b64decode(input)
def base64url_encode(input):
return base64.urlsafe_b64encode(input).replace('=', '')
def header(jwt):
header_segment = jwt.split('.', 1)[0]
try:
return json.loads(base64url_decode(header_segment))
except (ValueError, TypeError):
raise DecodeError("Invalid header encoding")
def encode(payload, key, algorithm='HS256'):
segments = []
header = {"typ": "JWT", "alg": algorithm}
segments.append(base64url_encode(json.dumps(header)))
segments.append(base64url_encode(json.dumps(payload)))
signing_input = '.'.join(segments)
try:
ascii_key = unicode(key).encode('utf8')
signature = signing_methods[algorithm](signing_input, ascii_key)
except KeyError:
raise NotImplementedError("Algorithm not supported")
segments.append(base64url_encode(signature))
return '.'.join(segments)
def decode(jwt, key='', verify=True):
try:
signing_input, crypto_segment = jwt.rsplit('.', 1)
header_segment, payload_segment = signing_input.split('.', 1)
except ValueError:
raise DecodeError("Not enough segments")
try:
header = json.loads(base64url_decode(header_segment))
payload = json.loads(base64url_decode(payload_segment))
signature = base64url_decode(crypto_segment)
except (ValueError, TypeError):
raise DecodeError("Invalid segment encoding")
if verify:
try:
ascii_key = unicode(key).encode('utf8')
if not signature == signing_methods[header['alg']](signing_input, ascii_key):
raise DecodeError("Signature verification failed")
except KeyError:
raise DecodeError("Algorithm not supported")
return payload
|
<commit_before><commit_msg>Add jwt library in contrib<commit_after>""" JSON Web Token implementation
Minimum implementation based on this spec:
http://self-issued.info/docs/draft-jones-json-web-token-01.html
"""
import base64
import hashlib
import hmac
try:
import json
except ImportError:
import simplejson as json
__all__ = ['encode', 'decode', 'DecodeError']
class DecodeError(Exception): pass
signing_methods = {
'HS256': lambda msg, key: hmac.new(key, msg, hashlib.sha256).digest(),
'HS384': lambda msg, key: hmac.new(key, msg, hashlib.sha384).digest(),
'HS512': lambda msg, key: hmac.new(key, msg, hashlib.sha512).digest(),
}
def base64url_decode(input):
input += '=' * (4 - (len(input) % 4))
return base64.urlsafe_b64decode(input)
def base64url_encode(input):
return base64.urlsafe_b64encode(input).replace('=', '')
def header(jwt):
header_segment = jwt.split('.', 1)[0]
try:
return json.loads(base64url_decode(header_segment))
except (ValueError, TypeError):
raise DecodeError("Invalid header encoding")
def encode(payload, key, algorithm='HS256'):
segments = []
header = {"typ": "JWT", "alg": algorithm}
segments.append(base64url_encode(json.dumps(header)))
segments.append(base64url_encode(json.dumps(payload)))
signing_input = '.'.join(segments)
try:
ascii_key = unicode(key).encode('utf8')
signature = signing_methods[algorithm](signing_input, ascii_key)
except KeyError:
raise NotImplementedError("Algorithm not supported")
segments.append(base64url_encode(signature))
return '.'.join(segments)
def decode(jwt, key='', verify=True):
try:
signing_input, crypto_segment = jwt.rsplit('.', 1)
header_segment, payload_segment = signing_input.split('.', 1)
except ValueError:
raise DecodeError("Not enough segments")
try:
header = json.loads(base64url_decode(header_segment))
payload = json.loads(base64url_decode(payload_segment))
signature = base64url_decode(crypto_segment)
except (ValueError, TypeError):
raise DecodeError("Invalid segment encoding")
if verify:
try:
ascii_key = unicode(key).encode('utf8')
if not signature == signing_methods[header['alg']](signing_input, ascii_key):
raise DecodeError("Signature verification failed")
except KeyError:
raise DecodeError("Algorithm not supported")
return payload
|
|
6e047e9a4e07c379bcb0c47d13fa91dec796858a
|
twitter-utils/search2stream.py
|
twitter-utils/search2stream.py
|
"""
Convert GET search (v1.1 or higher) from searchtweets.py to 'stream' format
as expected by twitter-streamer. This means it will enumerate each item in
the incoming JSON object's 'statuses' list, and output each on a separate
line.
"""
import sys
import simplejson as json
for line in sys.stdin:
try:
o = json.loads(line)
except json.JSONDecodeError:
sys.stderr.write("Parse error: %s\n" % line)
continue
for s in o['statuses']:
print json.dumps(s)
|
Convert search results to stream filter API format.
|
Convert search results to stream filter API format.
|
Python
|
mit
|
inactivist/twitter-utils
|
Convert search results to stream filter API format.
|
"""
Convert GET search (v1.1 or higher) from searchtweets.py to 'stream' format
as expected by twitter-streamer. This means it will enumerate each item in
the incoming JSON object's 'statuses' list, and output each on a separate
line.
"""
import sys
import simplejson as json
for line in sys.stdin:
try:
o = json.loads(line)
except json.JSONDecodeError:
sys.stderr.write("Parse error: %s\n" % line)
continue
for s in o['statuses']:
print json.dumps(s)
|
<commit_before><commit_msg>Convert search results to stream filter API format.<commit_after>
|
"""
Convert GET search (v1.1 or higher) from searchtweets.py to 'stream' format
as expected by twitter-streamer. This means it will enumerate each item in
the incoming JSON object's 'statuses' list, and output each on a separate
line.
"""
import sys
import simplejson as json
for line in sys.stdin:
try:
o = json.loads(line)
except json.JSONDecodeError:
sys.stderr.write("Parse error: %s\n" % line)
continue
for s in o['statuses']:
print json.dumps(s)
|
Convert search results to stream filter API format."""
Convert GET search (v1.1 or higher) from searchtweets.py to 'stream' format
as expected by twitter-streamer. This means it will enumerate each item in
the incoming JSON object's 'statuses' list, and output each on a separate
line.
"""
import sys
import simplejson as json
for line in sys.stdin:
try:
o = json.loads(line)
except json.JSONDecodeError:
sys.stderr.write("Parse error: %s\n" % line)
continue
for s in o['statuses']:
print json.dumps(s)
|
<commit_before><commit_msg>Convert search results to stream filter API format.<commit_after>"""
Convert GET search (v1.1 or higher) from searchtweets.py to 'stream' format
as expected by twitter-streamer. This means it will enumerate each item in
the incoming JSON object's 'statuses' list, and output each on a separate
line.
"""
import sys
import simplejson as json
for line in sys.stdin:
try:
o = json.loads(line)
except json.JSONDecodeError:
sys.stderr.write("Parse error: %s\n" % line)
continue
for s in o['statuses']:
print json.dumps(s)
|
|
4866c643c0cb8f041dbdfa9db0611279ca4ef98d
|
plane_fit.py
|
plane_fit.py
|
from scipy.optimize import least_squares as lsq
import numpy as np
def myplane(p, x, y, z):
return(p[0] + p[1] * x + p[2] * y - z)
def plane_fit(x, y, z, robust=False):
"""
Fits a plane to data without any given uncertainties or weighting.
Arguments:
----------
x, y: float
x and y coordinates of the data
z: float
z-coordinates to which the values are fit
Returns:
--------
coefficients:
3-element vector with components [ z0 (constant offset) , grad_x, grad_y]
"""
x0, y0 = np.median(x), np.median(y)
dataz = np.c_[np.ones(x.size),
x-x0,
y-y0]
lsqcoeffs, _, _, _ = np.linalg.lstsq(dataz,z)
if robust:
outputs = lsq(myplane, np.r_[lsqcoeffs],
args=([x-x0,
y-y0, z]),
loss = 'soft_l1')
lsqcoeffs = outputs.x
return(lsqcoeffs)
|
Add a plane fitting roufit
|
Add a plane fitting roufit
|
Python
|
mit
|
low-sky/py-low-sky
|
Add a plane fitting roufit
|
from scipy.optimize import least_squares as lsq
import numpy as np
def myplane(p, x, y, z):
return(p[0] + p[1] * x + p[2] * y - z)
def plane_fit(x, y, z, robust=False):
"""
Fits a plane to data without any given uncertainties or weighting.
Arguments:
----------
x, y: float
x and y coordinates of the data
z: float
z-coordinates to which the values are fit
Returns:
--------
coefficients:
3-element vector with components [ z0 (constant offset) , grad_x, grad_y]
"""
x0, y0 = np.median(x), np.median(y)
dataz = np.c_[np.ones(x.size),
x-x0,
y-y0]
lsqcoeffs, _, _, _ = np.linalg.lstsq(dataz,z)
if robust:
outputs = lsq(myplane, np.r_[lsqcoeffs],
args=([x-x0,
y-y0, z]),
loss = 'soft_l1')
lsqcoeffs = outputs.x
return(lsqcoeffs)
|
<commit_before><commit_msg>Add a plane fitting roufit<commit_after>
|
from scipy.optimize import least_squares as lsq
import numpy as np
def myplane(p, x, y, z):
return(p[0] + p[1] * x + p[2] * y - z)
def plane_fit(x, y, z, robust=False):
"""
Fits a plane to data without any given uncertainties or weighting.
Arguments:
----------
x, y: float
x and y coordinates of the data
z: float
z-coordinates to which the values are fit
Returns:
--------
coefficients:
3-element vector with components [ z0 (constant offset) , grad_x, grad_y]
"""
x0, y0 = np.median(x), np.median(y)
dataz = np.c_[np.ones(x.size),
x-x0,
y-y0]
lsqcoeffs, _, _, _ = np.linalg.lstsq(dataz,z)
if robust:
outputs = lsq(myplane, np.r_[lsqcoeffs],
args=([x-x0,
y-y0, z]),
loss = 'soft_l1')
lsqcoeffs = outputs.x
return(lsqcoeffs)
|
Add a plane fitting roufitfrom scipy.optimize import least_squares as lsq
import numpy as np
def myplane(p, x, y, z):
return(p[0] + p[1] * x + p[2] * y - z)
def plane_fit(x, y, z, robust=False):
"""
Fits a plane to data without any given uncertainties or weighting.
Arguments:
----------
x, y: float
x and y coordinates of the data
z: float
z-coordinates to which the values are fit
Returns:
--------
coefficients:
3-element vector with components [ z0 (constant offset) , grad_x, grad_y]
"""
x0, y0 = np.median(x), np.median(y)
dataz = np.c_[np.ones(x.size),
x-x0,
y-y0]
lsqcoeffs, _, _, _ = np.linalg.lstsq(dataz,z)
if robust:
outputs = lsq(myplane, np.r_[lsqcoeffs],
args=([x-x0,
y-y0, z]),
loss = 'soft_l1')
lsqcoeffs = outputs.x
return(lsqcoeffs)
|
<commit_before><commit_msg>Add a plane fitting roufit<commit_after>from scipy.optimize import least_squares as lsq
import numpy as np
def myplane(p, x, y, z):
return(p[0] + p[1] * x + p[2] * y - z)
def plane_fit(x, y, z, robust=False):
"""
Fits a plane to data without any given uncertainties or weighting.
Arguments:
----------
x, y: float
x and y coordinates of the data
z: float
z-coordinates to which the values are fit
Returns:
--------
coefficients:
3-element vector with components [ z0 (constant offset) , grad_x, grad_y]
"""
x0, y0 = np.median(x), np.median(y)
dataz = np.c_[np.ones(x.size),
x-x0,
y-y0]
lsqcoeffs, _, _, _ = np.linalg.lstsq(dataz,z)
if robust:
outputs = lsq(myplane, np.r_[lsqcoeffs],
args=([x-x0,
y-y0, z]),
loss = 'soft_l1')
lsqcoeffs = outputs.x
return(lsqcoeffs)
|
|
bf694c664649d225006b764af467fe106f59742e
|
tests/test_token.py
|
tests/test_token.py
|
import kindred
def test_token_str():
t = kindred.Token(word="hat",lemma="hat",partofspeech="NN",startPos=0,endPos=3)
assert str(t) == "hat"
def test_token_repr():
t = kindred.Token(word="hat",lemma="hat",partofspeech="NN",startPos=0,endPos=3)
assert t.__repr__() == "hat"
|
Add token test for string functions
|
Add token test for string functions
|
Python
|
mit
|
jakelever/kindred,jakelever/kindred
|
Add token test for string functions
|
import kindred
def test_token_str():
t = kindred.Token(word="hat",lemma="hat",partofspeech="NN",startPos=0,endPos=3)
assert str(t) == "hat"
def test_token_repr():
t = kindred.Token(word="hat",lemma="hat",partofspeech="NN",startPos=0,endPos=3)
assert t.__repr__() == "hat"
|
<commit_before><commit_msg>Add token test for string functions<commit_after>
|
import kindred
def test_token_str():
t = kindred.Token(word="hat",lemma="hat",partofspeech="NN",startPos=0,endPos=3)
assert str(t) == "hat"
def test_token_repr():
t = kindred.Token(word="hat",lemma="hat",partofspeech="NN",startPos=0,endPos=3)
assert t.__repr__() == "hat"
|
Add token test for string functionsimport kindred
def test_token_str():
t = kindred.Token(word="hat",lemma="hat",partofspeech="NN",startPos=0,endPos=3)
assert str(t) == "hat"
def test_token_repr():
t = kindred.Token(word="hat",lemma="hat",partofspeech="NN",startPos=0,endPos=3)
assert t.__repr__() == "hat"
|
<commit_before><commit_msg>Add token test for string functions<commit_after>import kindred
def test_token_str():
t = kindred.Token(word="hat",lemma="hat",partofspeech="NN",startPos=0,endPos=3)
assert str(t) == "hat"
def test_token_repr():
t = kindred.Token(word="hat",lemma="hat",partofspeech="NN",startPos=0,endPos=3)
assert t.__repr__() == "hat"
|
|
109595a93089a80d4b762e91c2eecad58025d69d
|
utils/tweet_text.py
|
utils/tweet_text.py
|
#!/usr/bin/env python
"""
Given a JSON file, return just the text of the tweet.
Example usage:
utils/tweet_text.py tweets.jsonl > tweets.txt
"""
from __future__ import print_function
import json
import fileinput
for line in fileinput.input():
tweet = json.loads(line)
if 'full_text' in tweet:
print(tweet['full_text'].encode('utf8'))
else:
print(tweet['text'].encode('utf8'))
|
Add util for extracting just the tweet text.
|
Add util for extracting just the tweet text.
|
Python
|
mit
|
hugovk/twarc,remagio/twarc,DocNow/twarc,edsu/twarc,remagio/twarc
|
Add util for extracting just the tweet text.
|
#!/usr/bin/env python
"""
Given a JSON file, return just the text of the tweet.
Example usage:
utils/tweet_text.py tweets.jsonl > tweets.txt
"""
from __future__ import print_function
import json
import fileinput
for line in fileinput.input():
tweet = json.loads(line)
if 'full_text' in tweet:
print(tweet['full_text'].encode('utf8'))
else:
print(tweet['text'].encode('utf8'))
|
<commit_before><commit_msg>Add util for extracting just the tweet text.<commit_after>
|
#!/usr/bin/env python
"""
Given a JSON file, return just the text of the tweet.
Example usage:
utils/tweet_text.py tweets.jsonl > tweets.txt
"""
from __future__ import print_function
import json
import fileinput
for line in fileinput.input():
tweet = json.loads(line)
if 'full_text' in tweet:
print(tweet['full_text'].encode('utf8'))
else:
print(tweet['text'].encode('utf8'))
|
Add util for extracting just the tweet text.#!/usr/bin/env python
"""
Given a JSON file, return just the text of the tweet.
Example usage:
utils/tweet_text.py tweets.jsonl > tweets.txt
"""
from __future__ import print_function
import json
import fileinput
for line in fileinput.input():
tweet = json.loads(line)
if 'full_text' in tweet:
print(tweet['full_text'].encode('utf8'))
else:
print(tweet['text'].encode('utf8'))
|
<commit_before><commit_msg>Add util for extracting just the tweet text.<commit_after>#!/usr/bin/env python
"""
Given a JSON file, return just the text of the tweet.
Example usage:
utils/tweet_text.py tweets.jsonl > tweets.txt
"""
from __future__ import print_function
import json
import fileinput
for line in fileinput.input():
tweet = json.loads(line)
if 'full_text' in tweet:
print(tweet['full_text'].encode('utf8'))
else:
print(tweet['text'].encode('utf8'))
|
|
ff44e924a4f01bd39d4b26a39519bf55dd5e7560
|
ann.py
|
ann.py
|
class ANN:
def __init__(self):
pass
def train(self):
pass
def predict(self):
pass
def update_weights(self):
pass
class Layer:
def __init__(self):
pass
|
Add top down design of ANN and Layer
|
Add top down design of ANN and Layer
|
Python
|
apache-2.0
|
Razvy000/ANN_Course
|
Add top down design of ANN and Layer
|
class ANN:
def __init__(self):
pass
def train(self):
pass
def predict(self):
pass
def update_weights(self):
pass
class Layer:
def __init__(self):
pass
|
<commit_before><commit_msg>Add top down design of ANN and Layer<commit_after>
|
class ANN:
def __init__(self):
pass
def train(self):
pass
def predict(self):
pass
def update_weights(self):
pass
class Layer:
def __init__(self):
pass
|
Add top down design of ANN and Layer
class ANN:
def __init__(self):
pass
def train(self):
pass
def predict(self):
pass
def update_weights(self):
pass
class Layer:
def __init__(self):
pass
|
<commit_before><commit_msg>Add top down design of ANN and Layer<commit_after>
class ANN:
def __init__(self):
pass
def train(self):
pass
def predict(self):
pass
def update_weights(self):
pass
class Layer:
def __init__(self):
pass
|
|
37de8d954b9e870590f5bfb9fff3e4ce4e41acca
|
mkt/extensions/migrations/0006_auto_20150914_0745.py
|
mkt/extensions/migrations/0006_auto_20150914_0745.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('extensions', '0005_auto_20150902_0755'),
]
operations = [
migrations.AlterField(
model_name='extension',
name='status',
field=models.PositiveSmallIntegerField(default=0, db_index=True, choices=[(0, 'Incomplete'), (2, 'Pending approval'), (4, 'Published'), (5, 'Obsolete'), (11, 'Deleted'), (12, 'Rejected'), (13, 'Approved'), (15, 'Blocked'), (16, 'Unlisted')]),
preserve_default=True,
),
migrations.AlterField(
model_name='extensionversion',
name='status',
field=models.PositiveSmallIntegerField(default=0, db_index=True, choices=[(0, 'Incomplete'), (2, 'Pending approval'), (4, 'Published'), (5, 'Obsolete'), (11, 'Deleted'), (12, 'Rejected'), (13, 'Approved'), (15, 'Blocked'), (16, 'Unlisted')]),
preserve_default=True,
),
]
|
Add missing migration following status choices changes
|
Add missing migration following status choices changes
|
Python
|
bsd-3-clause
|
diox/zamboni,washort/zamboni,elysium001/zamboni,diox/zamboni,ddurst/zamboni,elysium001/zamboni,diox/zamboni,ddurst/zamboni,elysium001/zamboni,elysium001/zamboni,washort/zamboni,mozilla/zamboni,mozilla/zamboni,ingenioustechie/zamboni,jasonthomas/zamboni,diox/zamboni,mozilla/zamboni,ingenioustechie/zamboni,jasonthomas/zamboni,ingenioustechie/zamboni,washort/zamboni,jasonthomas/zamboni,ingenioustechie/zamboni,ddurst/zamboni,mozilla/zamboni,ddurst/zamboni,jasonthomas/zamboni,washort/zamboni
|
Add missing migration following status choices changes
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('extensions', '0005_auto_20150902_0755'),
]
operations = [
migrations.AlterField(
model_name='extension',
name='status',
field=models.PositiveSmallIntegerField(default=0, db_index=True, choices=[(0, 'Incomplete'), (2, 'Pending approval'), (4, 'Published'), (5, 'Obsolete'), (11, 'Deleted'), (12, 'Rejected'), (13, 'Approved'), (15, 'Blocked'), (16, 'Unlisted')]),
preserve_default=True,
),
migrations.AlterField(
model_name='extensionversion',
name='status',
field=models.PositiveSmallIntegerField(default=0, db_index=True, choices=[(0, 'Incomplete'), (2, 'Pending approval'), (4, 'Published'), (5, 'Obsolete'), (11, 'Deleted'), (12, 'Rejected'), (13, 'Approved'), (15, 'Blocked'), (16, 'Unlisted')]),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add missing migration following status choices changes<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('extensions', '0005_auto_20150902_0755'),
]
operations = [
migrations.AlterField(
model_name='extension',
name='status',
field=models.PositiveSmallIntegerField(default=0, db_index=True, choices=[(0, 'Incomplete'), (2, 'Pending approval'), (4, 'Published'), (5, 'Obsolete'), (11, 'Deleted'), (12, 'Rejected'), (13, 'Approved'), (15, 'Blocked'), (16, 'Unlisted')]),
preserve_default=True,
),
migrations.AlterField(
model_name='extensionversion',
name='status',
field=models.PositiveSmallIntegerField(default=0, db_index=True, choices=[(0, 'Incomplete'), (2, 'Pending approval'), (4, 'Published'), (5, 'Obsolete'), (11, 'Deleted'), (12, 'Rejected'), (13, 'Approved'), (15, 'Blocked'), (16, 'Unlisted')]),
preserve_default=True,
),
]
|
Add missing migration following status choices changes# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('extensions', '0005_auto_20150902_0755'),
]
operations = [
migrations.AlterField(
model_name='extension',
name='status',
field=models.PositiveSmallIntegerField(default=0, db_index=True, choices=[(0, 'Incomplete'), (2, 'Pending approval'), (4, 'Published'), (5, 'Obsolete'), (11, 'Deleted'), (12, 'Rejected'), (13, 'Approved'), (15, 'Blocked'), (16, 'Unlisted')]),
preserve_default=True,
),
migrations.AlterField(
model_name='extensionversion',
name='status',
field=models.PositiveSmallIntegerField(default=0, db_index=True, choices=[(0, 'Incomplete'), (2, 'Pending approval'), (4, 'Published'), (5, 'Obsolete'), (11, 'Deleted'), (12, 'Rejected'), (13, 'Approved'), (15, 'Blocked'), (16, 'Unlisted')]),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add missing migration following status choices changes<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('extensions', '0005_auto_20150902_0755'),
]
operations = [
migrations.AlterField(
model_name='extension',
name='status',
field=models.PositiveSmallIntegerField(default=0, db_index=True, choices=[(0, 'Incomplete'), (2, 'Pending approval'), (4, 'Published'), (5, 'Obsolete'), (11, 'Deleted'), (12, 'Rejected'), (13, 'Approved'), (15, 'Blocked'), (16, 'Unlisted')]),
preserve_default=True,
),
migrations.AlterField(
model_name='extensionversion',
name='status',
field=models.PositiveSmallIntegerField(default=0, db_index=True, choices=[(0, 'Incomplete'), (2, 'Pending approval'), (4, 'Published'), (5, 'Obsolete'), (11, 'Deleted'), (12, 'Rejected'), (13, 'Approved'), (15, 'Blocked'), (16, 'Unlisted')]),
preserve_default=True,
),
]
|
|
9e178a56d84a634a32b0f05d5a3c7436c565418f
|
script2.py
|
script2.py
|
from suds.client import Client
url = 'http://legislatie.just.ro/apiws/FreeWebService.svc?wsdl'
print("==== create client")
client = Client(url)
print(client)
print("==== get token")
token = client.service.GetToken()
print(token)
print("==== do search")
search_model = client.factory.create('SearchModel')
search_model.NumarPagina = 0
search_model.RezultatePagina = 0
search_model.SearchAn = 2014
search_model.SearchTitlu = "medici"
results = client.service.Search(search_model, token)
for law in results.Legi:
print("{0}\n{1}\n\n".format(law.Titlu, law.DataVigoare))
|
Add example for searching in titles
|
Add example for searching in titles
|
Python
|
mit
|
govro/legislatie-just-python-soap-client,govro/legislatie-just-python-soap-client
|
Add example for searching in titles
|
from suds.client import Client
url = 'http://legislatie.just.ro/apiws/FreeWebService.svc?wsdl'
print("==== create client")
client = Client(url)
print(client)
print("==== get token")
token = client.service.GetToken()
print(token)
print("==== do search")
search_model = client.factory.create('SearchModel')
search_model.NumarPagina = 0
search_model.RezultatePagina = 0
search_model.SearchAn = 2014
search_model.SearchTitlu = "medici"
results = client.service.Search(search_model, token)
for law in results.Legi:
print("{0}\n{1}\n\n".format(law.Titlu, law.DataVigoare))
|
<commit_before><commit_msg>Add example for searching in titles<commit_after>
|
from suds.client import Client
url = 'http://legislatie.just.ro/apiws/FreeWebService.svc?wsdl'
print("==== create client")
client = Client(url)
print(client)
print("==== get token")
token = client.service.GetToken()
print(token)
print("==== do search")
search_model = client.factory.create('SearchModel')
search_model.NumarPagina = 0
search_model.RezultatePagina = 0
search_model.SearchAn = 2014
search_model.SearchTitlu = "medici"
results = client.service.Search(search_model, token)
for law in results.Legi:
print("{0}\n{1}\n\n".format(law.Titlu, law.DataVigoare))
|
Add example for searching in titlesfrom suds.client import Client
url = 'http://legislatie.just.ro/apiws/FreeWebService.svc?wsdl'
print("==== create client")
client = Client(url)
print(client)
print("==== get token")
token = client.service.GetToken()
print(token)
print("==== do search")
search_model = client.factory.create('SearchModel')
search_model.NumarPagina = 0
search_model.RezultatePagina = 0
search_model.SearchAn = 2014
search_model.SearchTitlu = "medici"
results = client.service.Search(search_model, token)
for law in results.Legi:
print("{0}\n{1}\n\n".format(law.Titlu, law.DataVigoare))
|
<commit_before><commit_msg>Add example for searching in titles<commit_after>from suds.client import Client
url = 'http://legislatie.just.ro/apiws/FreeWebService.svc?wsdl'
print("==== create client")
client = Client(url)
print(client)
print("==== get token")
token = client.service.GetToken()
print(token)
print("==== do search")
search_model = client.factory.create('SearchModel')
search_model.NumarPagina = 0
search_model.RezultatePagina = 0
search_model.SearchAn = 2014
search_model.SearchTitlu = "medici"
results = client.service.Search(search_model, token)
for law in results.Legi:
print("{0}\n{1}\n\n".format(law.Titlu, law.DataVigoare))
|
|
c80f8e1c1d53f1a19b654f17361d40720b5a90fe
|
app/core/netutils.py
|
app/core/netutils.py
|
import logging
import re
import socket
from subprocess import Popen, PIPE
logger = logging.getLogger(__name__)
def get_mac_address(host):
""" Returns MAC address for a hostname. """
mac_pattern = '(([0-9a-fA-F]{2}:){5}[0-9a-fA-F]{2})'
try:
host = socket.gethostbyname(host)
except socket.error:
pass
proc = Popen(["arp", "-a", host], stdout=PIPE)
for line in proc.stdout:
if host in line:
matches = re.findall(mac_pattern, line)
if matches:
return matches[0][0]
return None
|
Add utility to get mac address from host.
|
Add utility to get mac address from host.
|
Python
|
mit
|
supersaiyanmode/HomePiServer,supersaiyanmode/HomePiServer,supersaiyanmode/HomePiServer
|
Add utility to get mac address from host.
|
import logging
import re
import socket
from subprocess import Popen, PIPE
logger = logging.getLogger(__name__)
def get_mac_address(host):
""" Returns MAC address for a hostname. """
mac_pattern = '(([0-9a-fA-F]{2}:){5}[0-9a-fA-F]{2})'
try:
host = socket.gethostbyname(host)
except socket.error:
pass
proc = Popen(["arp", "-a", host], stdout=PIPE)
for line in proc.stdout:
if host in line:
matches = re.findall(mac_pattern, line)
if matches:
return matches[0][0]
return None
|
<commit_before><commit_msg>Add utility to get mac address from host.<commit_after>
|
import logging
import re
import socket
from subprocess import Popen, PIPE
logger = logging.getLogger(__name__)
def get_mac_address(host):
""" Returns MAC address for a hostname. """
mac_pattern = '(([0-9a-fA-F]{2}:){5}[0-9a-fA-F]{2})'
try:
host = socket.gethostbyname(host)
except socket.error:
pass
proc = Popen(["arp", "-a", host], stdout=PIPE)
for line in proc.stdout:
if host in line:
matches = re.findall(mac_pattern, line)
if matches:
return matches[0][0]
return None
|
Add utility to get mac address from host.import logging
import re
import socket
from subprocess import Popen, PIPE
logger = logging.getLogger(__name__)
def get_mac_address(host):
""" Returns MAC address for a hostname. """
mac_pattern = '(([0-9a-fA-F]{2}:){5}[0-9a-fA-F]{2})'
try:
host = socket.gethostbyname(host)
except socket.error:
pass
proc = Popen(["arp", "-a", host], stdout=PIPE)
for line in proc.stdout:
if host in line:
matches = re.findall(mac_pattern, line)
if matches:
return matches[0][0]
return None
|
<commit_before><commit_msg>Add utility to get mac address from host.<commit_after>import logging
import re
import socket
from subprocess import Popen, PIPE
logger = logging.getLogger(__name__)
def get_mac_address(host):
""" Returns MAC address for a hostname. """
mac_pattern = '(([0-9a-fA-F]{2}:){5}[0-9a-fA-F]{2})'
try:
host = socket.gethostbyname(host)
except socket.error:
pass
proc = Popen(["arp", "-a", host], stdout=PIPE)
for line in proc.stdout:
if host in line:
matches = re.findall(mac_pattern, line)
if matches:
return matches[0][0]
return None
|
|
b7d95fd7ae0515b15c14e6f01f3c22645953afe7
|
example/add_comments.py
|
example/add_comments.py
|
import bugsy
bz = bugsy.Bugsy("username", "password", "https://bugzilla-dev.allizom.org/rest")
bug = bugsy.Bug()
bug.summary = "I love cheese"
bug.add_comment('I do love sausages too')
bz.put(bug)
bug.add_comment('I do love eggs too')
|
Add example for adding comments that runs against a real server
|
Add example for adding comments that runs against a real server
|
Python
|
apache-2.0
|
AutomatedTester/Bugsy,indygreg/Bugsy,parkouss/Bugsy
|
Add example for adding comments that runs against a real server
|
import bugsy
bz = bugsy.Bugsy("username", "password", "https://bugzilla-dev.allizom.org/rest")
bug = bugsy.Bug()
bug.summary = "I love cheese"
bug.add_comment('I do love sausages too')
bz.put(bug)
bug.add_comment('I do love eggs too')
|
<commit_before><commit_msg>Add example for adding comments that runs against a real server<commit_after>
|
import bugsy
bz = bugsy.Bugsy("username", "password", "https://bugzilla-dev.allizom.org/rest")
bug = bugsy.Bug()
bug.summary = "I love cheese"
bug.add_comment('I do love sausages too')
bz.put(bug)
bug.add_comment('I do love eggs too')
|
Add example for adding comments that runs against a real serverimport bugsy
bz = bugsy.Bugsy("username", "password", "https://bugzilla-dev.allizom.org/rest")
bug = bugsy.Bug()
bug.summary = "I love cheese"
bug.add_comment('I do love sausages too')
bz.put(bug)
bug.add_comment('I do love eggs too')
|
<commit_before><commit_msg>Add example for adding comments that runs against a real server<commit_after>import bugsy
bz = bugsy.Bugsy("username", "password", "https://bugzilla-dev.allizom.org/rest")
bug = bugsy.Bug()
bug.summary = "I love cheese"
bug.add_comment('I do love sausages too')
bz.put(bug)
bug.add_comment('I do love eggs too')
|
|
4a1577d1a31d30fa006d4030114c9e2790056127
|
nagios-plugins/check_ip_pool.py
|
nagios-plugins/check_ip_pool.py
|
#!/usr/bin/env python
"""
Check for remaining IP addresses
"""
# pylint: disable=import-error
from neutronclient.v2_0 import client
from ipaddress import ip_network
import sys
import argparse
NAGIOS_OK = 0
NAGIOS_WARNING = 1
NAGIOS_CRITICAL = 2
NAGIOS_UNKNOWN = 3
def main():
"""
Main script body
"""
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--username', required=True)
parser.add_argument('-p', '--password', required=True)
parser.add_argument('-t', '--tenant_name', required=True)
parser.add_argument('-a', '--auth_url', required=True)
parser.add_argument('-w', '--warn', type=int, required=True)
parser.add_argument('-c', '--critical', type=int, required=True)
args = parser.parse_args()
neutron = client.Client(username=args.username, password=args.password,
tenant_name=args.tenant_name,
auth_url=args.auth_url)
neutron.format = 'json'
for arg in [args.warn, args.critical]:
if not 0 <= arg <= 100:
print "Alert parameters must be valid percentages"
sys.exit(NAGIOS_UNKNOWN)
# Get external network
# Assume a single external network for the minute
ext_net = [net for net in neutron.list_networks()['networks']
if net['router:external']]
total_addresses = 0
for subnet in neutron.show_network(ext_net[0]['id'])['network']['subnets']:
total_addresses += ip_network(neutron.show_subnet(subnet)
['subnet']['cidr']).num_addresses
floating_ips = len(neutron.list_floatingips()['floatingips'])
router_ips = len([router for router in neutron.list_routers()['routers']
if router['external_gateway_info']])
total_used = floating_ips + router_ips
percentage_used = 100 * total_used/total_addresses
if percentage_used >= args.warn:
code = NAGIOS_WARNING
msg = 'WARNING'
elif percentage_used >= args.critical:
code = NAGIOS_CRITICAL
msg = 'CRITICAL'
else:
code = NAGIOS_OK
msg = 'OK'
print '{0}: {1}% of IP pool used, '\
'{2} out of {5} addresses in use | '\
'total_used={2};{3};{4};;{5} '\
'total_available={5} '\
'floating_ips_used={6} '\
'ext_routers_used={7}'\
.format(msg, percentage_used, total_used,
(total_addresses * args.warn)/100,
(total_addresses * args.critical)/100,
total_addresses, floating_ips, router_ips)
sys.exit(code)
if __name__ == "__main__":
main()
|
Add nagios check for remaining IP addresses
|
Add nagios check for remaining IP addresses
Check currently assumes a single external network and checks for routers
with allocated IP's and floating IP's allocated
Change-Id: I6835de8b036ed5247994ebd904f63147dfef3d67
|
Python
|
apache-2.0
|
openstack/osops-tools-monitoring,openstack/osops-tools-monitoring,openstack/osops-tools-monitoring
|
Add nagios check for remaining IP addresses
Check currently assumes a single external network and checks for routers
with allocated IP's and floating IP's allocated
Change-Id: I6835de8b036ed5247994ebd904f63147dfef3d67
|
#!/usr/bin/env python
"""
Check for remaining IP addresses
"""
# pylint: disable=import-error
from neutronclient.v2_0 import client
from ipaddress import ip_network
import sys
import argparse
NAGIOS_OK = 0
NAGIOS_WARNING = 1
NAGIOS_CRITICAL = 2
NAGIOS_UNKNOWN = 3
def main():
"""
Main script body
"""
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--username', required=True)
parser.add_argument('-p', '--password', required=True)
parser.add_argument('-t', '--tenant_name', required=True)
parser.add_argument('-a', '--auth_url', required=True)
parser.add_argument('-w', '--warn', type=int, required=True)
parser.add_argument('-c', '--critical', type=int, required=True)
args = parser.parse_args()
neutron = client.Client(username=args.username, password=args.password,
tenant_name=args.tenant_name,
auth_url=args.auth_url)
neutron.format = 'json'
for arg in [args.warn, args.critical]:
if not 0 <= arg <= 100:
print "Alert parameters must be valid percentages"
sys.exit(NAGIOS_UNKNOWN)
# Get external network
# Assume a single external network for the minute
ext_net = [net for net in neutron.list_networks()['networks']
if net['router:external']]
total_addresses = 0
for subnet in neutron.show_network(ext_net[0]['id'])['network']['subnets']:
total_addresses += ip_network(neutron.show_subnet(subnet)
['subnet']['cidr']).num_addresses
floating_ips = len(neutron.list_floatingips()['floatingips'])
router_ips = len([router for router in neutron.list_routers()['routers']
if router['external_gateway_info']])
total_used = floating_ips + router_ips
percentage_used = 100 * total_used/total_addresses
if percentage_used >= args.warn:
code = NAGIOS_WARNING
msg = 'WARNING'
elif percentage_used >= args.critical:
code = NAGIOS_CRITICAL
msg = 'CRITICAL'
else:
code = NAGIOS_OK
msg = 'OK'
print '{0}: {1}% of IP pool used, '\
'{2} out of {5} addresses in use | '\
'total_used={2};{3};{4};;{5} '\
'total_available={5} '\
'floating_ips_used={6} '\
'ext_routers_used={7}'\
.format(msg, percentage_used, total_used,
(total_addresses * args.warn)/100,
(total_addresses * args.critical)/100,
total_addresses, floating_ips, router_ips)
sys.exit(code)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add nagios check for remaining IP addresses
Check currently assumes a single external network and checks for routers
with allocated IP's and floating IP's allocated
Change-Id: I6835de8b036ed5247994ebd904f63147dfef3d67<commit_after>
|
#!/usr/bin/env python
"""
Check for remaining IP addresses
"""
# pylint: disable=import-error
from neutronclient.v2_0 import client
from ipaddress import ip_network
import sys
import argparse
NAGIOS_OK = 0
NAGIOS_WARNING = 1
NAGIOS_CRITICAL = 2
NAGIOS_UNKNOWN = 3
def main():
"""
Main script body
"""
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--username', required=True)
parser.add_argument('-p', '--password', required=True)
parser.add_argument('-t', '--tenant_name', required=True)
parser.add_argument('-a', '--auth_url', required=True)
parser.add_argument('-w', '--warn', type=int, required=True)
parser.add_argument('-c', '--critical', type=int, required=True)
args = parser.parse_args()
neutron = client.Client(username=args.username, password=args.password,
tenant_name=args.tenant_name,
auth_url=args.auth_url)
neutron.format = 'json'
for arg in [args.warn, args.critical]:
if not 0 <= arg <= 100:
print "Alert parameters must be valid percentages"
sys.exit(NAGIOS_UNKNOWN)
# Get external network
# Assume a single external network for the minute
ext_net = [net for net in neutron.list_networks()['networks']
if net['router:external']]
total_addresses = 0
for subnet in neutron.show_network(ext_net[0]['id'])['network']['subnets']:
total_addresses += ip_network(neutron.show_subnet(subnet)
['subnet']['cidr']).num_addresses
floating_ips = len(neutron.list_floatingips()['floatingips'])
router_ips = len([router for router in neutron.list_routers()['routers']
if router['external_gateway_info']])
total_used = floating_ips + router_ips
percentage_used = 100 * total_used/total_addresses
if percentage_used >= args.warn:
code = NAGIOS_WARNING
msg = 'WARNING'
elif percentage_used >= args.critical:
code = NAGIOS_CRITICAL
msg = 'CRITICAL'
else:
code = NAGIOS_OK
msg = 'OK'
print '{0}: {1}% of IP pool used, '\
'{2} out of {5} addresses in use | '\
'total_used={2};{3};{4};;{5} '\
'total_available={5} '\
'floating_ips_used={6} '\
'ext_routers_used={7}'\
.format(msg, percentage_used, total_used,
(total_addresses * args.warn)/100,
(total_addresses * args.critical)/100,
total_addresses, floating_ips, router_ips)
sys.exit(code)
if __name__ == "__main__":
main()
|
Add nagios check for remaining IP addresses
Check currently assumes a single external network and checks for routers
with allocated IP's and floating IP's allocated
Change-Id: I6835de8b036ed5247994ebd904f63147dfef3d67#!/usr/bin/env python
"""
Check for remaining IP addresses
"""
# pylint: disable=import-error
from neutronclient.v2_0 import client
from ipaddress import ip_network
import sys
import argparse
NAGIOS_OK = 0
NAGIOS_WARNING = 1
NAGIOS_CRITICAL = 2
NAGIOS_UNKNOWN = 3
def main():
"""
Main script body
"""
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--username', required=True)
parser.add_argument('-p', '--password', required=True)
parser.add_argument('-t', '--tenant_name', required=True)
parser.add_argument('-a', '--auth_url', required=True)
parser.add_argument('-w', '--warn', type=int, required=True)
parser.add_argument('-c', '--critical', type=int, required=True)
args = parser.parse_args()
neutron = client.Client(username=args.username, password=args.password,
tenant_name=args.tenant_name,
auth_url=args.auth_url)
neutron.format = 'json'
for arg in [args.warn, args.critical]:
if not 0 <= arg <= 100:
print "Alert parameters must be valid percentages"
sys.exit(NAGIOS_UNKNOWN)
# Get external network
# Assume a single external network for the minute
ext_net = [net for net in neutron.list_networks()['networks']
if net['router:external']]
total_addresses = 0
for subnet in neutron.show_network(ext_net[0]['id'])['network']['subnets']:
total_addresses += ip_network(neutron.show_subnet(subnet)
['subnet']['cidr']).num_addresses
floating_ips = len(neutron.list_floatingips()['floatingips'])
router_ips = len([router for router in neutron.list_routers()['routers']
if router['external_gateway_info']])
total_used = floating_ips + router_ips
percentage_used = 100 * total_used/total_addresses
if percentage_used >= args.warn:
code = NAGIOS_WARNING
msg = 'WARNING'
elif percentage_used >= args.critical:
code = NAGIOS_CRITICAL
msg = 'CRITICAL'
else:
code = NAGIOS_OK
msg = 'OK'
print '{0}: {1}% of IP pool used, '\
'{2} out of {5} addresses in use | '\
'total_used={2};{3};{4};;{5} '\
'total_available={5} '\
'floating_ips_used={6} '\
'ext_routers_used={7}'\
.format(msg, percentage_used, total_used,
(total_addresses * args.warn)/100,
(total_addresses * args.critical)/100,
total_addresses, floating_ips, router_ips)
sys.exit(code)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add nagios check for remaining IP addresses
Check currently assumes a single external network and checks for routers
with allocated IP's and floating IP's allocated
Change-Id: I6835de8b036ed5247994ebd904f63147dfef3d67<commit_after>#!/usr/bin/env python
"""
Check for remaining IP addresses
"""
# pylint: disable=import-error
from neutronclient.v2_0 import client
from ipaddress import ip_network
import sys
import argparse
NAGIOS_OK = 0
NAGIOS_WARNING = 1
NAGIOS_CRITICAL = 2
NAGIOS_UNKNOWN = 3
def main():
"""
Main script body
"""
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--username', required=True)
parser.add_argument('-p', '--password', required=True)
parser.add_argument('-t', '--tenant_name', required=True)
parser.add_argument('-a', '--auth_url', required=True)
parser.add_argument('-w', '--warn', type=int, required=True)
parser.add_argument('-c', '--critical', type=int, required=True)
args = parser.parse_args()
neutron = client.Client(username=args.username, password=args.password,
tenant_name=args.tenant_name,
auth_url=args.auth_url)
neutron.format = 'json'
for arg in [args.warn, args.critical]:
if not 0 <= arg <= 100:
print "Alert parameters must be valid percentages"
sys.exit(NAGIOS_UNKNOWN)
# Get external network
# Assume a single external network for the minute
ext_net = [net for net in neutron.list_networks()['networks']
if net['router:external']]
total_addresses = 0
for subnet in neutron.show_network(ext_net[0]['id'])['network']['subnets']:
total_addresses += ip_network(neutron.show_subnet(subnet)
['subnet']['cidr']).num_addresses
floating_ips = len(neutron.list_floatingips()['floatingips'])
router_ips = len([router for router in neutron.list_routers()['routers']
if router['external_gateway_info']])
total_used = floating_ips + router_ips
percentage_used = 100 * total_used/total_addresses
if percentage_used >= args.warn:
code = NAGIOS_WARNING
msg = 'WARNING'
elif percentage_used >= args.critical:
code = NAGIOS_CRITICAL
msg = 'CRITICAL'
else:
code = NAGIOS_OK
msg = 'OK'
print '{0}: {1}% of IP pool used, '\
'{2} out of {5} addresses in use | '\
'total_used={2};{3};{4};;{5} '\
'total_available={5} '\
'floating_ips_used={6} '\
'ext_routers_used={7}'\
.format(msg, percentage_used, total_used,
(total_addresses * args.warn)/100,
(total_addresses * args.critical)/100,
total_addresses, floating_ips, router_ips)
sys.exit(code)
if __name__ == "__main__":
main()
|
|
43d1f8f1d5ef6a7452d6bad7e10d5be1f688c0b9
|
app/knn_prediction.py
|
app/knn_prediction.py
|
#!/usr/bin/env python
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
import validate_dataset as validate
# Make predictions using K-Nearest Neighbors
knn = KNeighborsClassifier()
# load train data from validate
knn.fit(validate.X_train, validate.Y_train)
# get predictions and registers from validation
predictions = knn.predict(validate.X_validation)
registers = validate.Y_validation
if __name__ == '__main__':
# accuracy score 0.9 is equal 90%
print(accuracy_score(registers, predictions))
# matrix with 3 errors made
print(confusion_matrix(registers, predictions))
# classification table (precision, recall, f1-score, support)
print(classification_report(registers, predictions))
|
Add predication from K-Nearest Neighbors
|
Add predication from K-Nearest Neighbors
|
Python
|
mit
|
lucasb/iris-machine-learning
|
Add predication from K-Nearest Neighbors
|
#!/usr/bin/env python
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
import validate_dataset as validate
# Make predictions using K-Nearest Neighbors
knn = KNeighborsClassifier()
# load train data from validate
knn.fit(validate.X_train, validate.Y_train)
# get predictions and registers from validation
predictions = knn.predict(validate.X_validation)
registers = validate.Y_validation
if __name__ == '__main__':
# accuracy score 0.9 is equal 90%
print(accuracy_score(registers, predictions))
# matrix with 3 errors made
print(confusion_matrix(registers, predictions))
# classification table (precision, recall, f1-score, support)
print(classification_report(registers, predictions))
|
<commit_before><commit_msg>Add predication from K-Nearest Neighbors<commit_after>
|
#!/usr/bin/env python
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
import validate_dataset as validate
# Make predictions using K-Nearest Neighbors
knn = KNeighborsClassifier()
# load train data from validate
knn.fit(validate.X_train, validate.Y_train)
# get predictions and registers from validation
predictions = knn.predict(validate.X_validation)
registers = validate.Y_validation
if __name__ == '__main__':
# accuracy score 0.9 is equal 90%
print(accuracy_score(registers, predictions))
# matrix with 3 errors made
print(confusion_matrix(registers, predictions))
# classification table (precision, recall, f1-score, support)
print(classification_report(registers, predictions))
|
Add predication from K-Nearest Neighbors#!/usr/bin/env python
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
import validate_dataset as validate
# Make predictions using K-Nearest Neighbors
knn = KNeighborsClassifier()
# load train data from validate
knn.fit(validate.X_train, validate.Y_train)
# get predictions and registers from validation
predictions = knn.predict(validate.X_validation)
registers = validate.Y_validation
if __name__ == '__main__':
# accuracy score 0.9 is equal 90%
print(accuracy_score(registers, predictions))
# matrix with 3 errors made
print(confusion_matrix(registers, predictions))
# classification table (precision, recall, f1-score, support)
print(classification_report(registers, predictions))
|
<commit_before><commit_msg>Add predication from K-Nearest Neighbors<commit_after>#!/usr/bin/env python
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
import validate_dataset as validate
# Make predictions using K-Nearest Neighbors
knn = KNeighborsClassifier()
# load train data from validate
knn.fit(validate.X_train, validate.Y_train)
# get predictions and registers from validation
predictions = knn.predict(validate.X_validation)
registers = validate.Y_validation
if __name__ == '__main__':
# accuracy score 0.9 is equal 90%
print(accuracy_score(registers, predictions))
# matrix with 3 errors made
print(confusion_matrix(registers, predictions))
# classification table (precision, recall, f1-score, support)
print(classification_report(registers, predictions))
|
|
a121281532dc3c9da6534684da0eae0ab57b000b
|
nose2/tests/unit/test_config.py
|
nose2/tests/unit/test_config.py
|
from nose2 import config
from nose2.compat import unittest
class TestConfigSession(unittest.TestCase):
def test_can_create_session(self):
config.Session()
class TestConfig(unittest.TestCase):
def setUp(self):
self.conf = config.Config([('a', ' 1 '), ('b', ' x\n y ')])
def test_as_int(self):
self.assertEqual(self.conf.as_int('a'), 1)
def test_as_str(self):
self.assertEqual(self.conf.as_str('a'), '1')
self.assertEqual(self.conf.as_str('b'), 'x\n y')
self.assertEqual(self.conf.as_str('missing', 'default'), 'default')
def test_as_bool(self):
self.assertEqual(self.conf.as_bool('a'), True)
def test_as_float(self):
self.assertAlmostEqual(self.conf.as_float('a'), 1.0)
def test_as_list(self):
self.assertEqual(self.conf.as_list('b'), ['x', 'y'])
|
from nose2 import config
from nose2.compat import unittest
class TestConfigSession(unittest.TestCase):
def test_can_create_session(self):
config.Session()
class TestConfig(unittest.TestCase):
def setUp(self):
self.conf = config.Config([
('a', ' 1 '), ('b', ' x\n y '), ('c', '0')])
def test_as_int(self):
self.assertEqual(self.conf.as_int('a'), 1)
def test_as_str(self):
self.assertEqual(self.conf.as_str('a'), '1')
self.assertEqual(self.conf.as_str('b'), 'x\n y')
self.assertEqual(self.conf.as_str('missing', 'default'), 'default')
def test_as_bool(self):
self.assertEqual(self.conf.as_bool('a'), True)
self.assertEqual(self.conf.as_bool('c'), False)
def test_as_float(self):
self.assertAlmostEqual(self.conf.as_float('a'), 1.0)
def test_as_list(self):
self.assertEqual(self.conf.as_list('b'), ['x', 'y'])
|
Add test for as_bool bug
|
Add test for as_bool bug
|
Python
|
bsd-2-clause
|
little-dude/nose2,ojengwa/nose2,ezigman/nose2,ptthiem/nose2,ptthiem/nose2,ezigman/nose2,little-dude/nose2,leth/nose2,ojengwa/nose2,leth/nose2
|
from nose2 import config
from nose2.compat import unittest
class TestConfigSession(unittest.TestCase):
def test_can_create_session(self):
config.Session()
class TestConfig(unittest.TestCase):
def setUp(self):
self.conf = config.Config([('a', ' 1 '), ('b', ' x\n y ')])
def test_as_int(self):
self.assertEqual(self.conf.as_int('a'), 1)
def test_as_str(self):
self.assertEqual(self.conf.as_str('a'), '1')
self.assertEqual(self.conf.as_str('b'), 'x\n y')
self.assertEqual(self.conf.as_str('missing', 'default'), 'default')
def test_as_bool(self):
self.assertEqual(self.conf.as_bool('a'), True)
def test_as_float(self):
self.assertAlmostEqual(self.conf.as_float('a'), 1.0)
def test_as_list(self):
self.assertEqual(self.conf.as_list('b'), ['x', 'y'])
Add test for as_bool bug
|
from nose2 import config
from nose2.compat import unittest
class TestConfigSession(unittest.TestCase):
def test_can_create_session(self):
config.Session()
class TestConfig(unittest.TestCase):
def setUp(self):
self.conf = config.Config([
('a', ' 1 '), ('b', ' x\n y '), ('c', '0')])
def test_as_int(self):
self.assertEqual(self.conf.as_int('a'), 1)
def test_as_str(self):
self.assertEqual(self.conf.as_str('a'), '1')
self.assertEqual(self.conf.as_str('b'), 'x\n y')
self.assertEqual(self.conf.as_str('missing', 'default'), 'default')
def test_as_bool(self):
self.assertEqual(self.conf.as_bool('a'), True)
self.assertEqual(self.conf.as_bool('c'), False)
def test_as_float(self):
self.assertAlmostEqual(self.conf.as_float('a'), 1.0)
def test_as_list(self):
self.assertEqual(self.conf.as_list('b'), ['x', 'y'])
|
<commit_before>from nose2 import config
from nose2.compat import unittest
class TestConfigSession(unittest.TestCase):
def test_can_create_session(self):
config.Session()
class TestConfig(unittest.TestCase):
def setUp(self):
self.conf = config.Config([('a', ' 1 '), ('b', ' x\n y ')])
def test_as_int(self):
self.assertEqual(self.conf.as_int('a'), 1)
def test_as_str(self):
self.assertEqual(self.conf.as_str('a'), '1')
self.assertEqual(self.conf.as_str('b'), 'x\n y')
self.assertEqual(self.conf.as_str('missing', 'default'), 'default')
def test_as_bool(self):
self.assertEqual(self.conf.as_bool('a'), True)
def test_as_float(self):
self.assertAlmostEqual(self.conf.as_float('a'), 1.0)
def test_as_list(self):
self.assertEqual(self.conf.as_list('b'), ['x', 'y'])
<commit_msg>Add test for as_bool bug<commit_after>
|
from nose2 import config
from nose2.compat import unittest
class TestConfigSession(unittest.TestCase):
def test_can_create_session(self):
config.Session()
class TestConfig(unittest.TestCase):
def setUp(self):
self.conf = config.Config([
('a', ' 1 '), ('b', ' x\n y '), ('c', '0')])
def test_as_int(self):
self.assertEqual(self.conf.as_int('a'), 1)
def test_as_str(self):
self.assertEqual(self.conf.as_str('a'), '1')
self.assertEqual(self.conf.as_str('b'), 'x\n y')
self.assertEqual(self.conf.as_str('missing', 'default'), 'default')
def test_as_bool(self):
self.assertEqual(self.conf.as_bool('a'), True)
self.assertEqual(self.conf.as_bool('c'), False)
def test_as_float(self):
self.assertAlmostEqual(self.conf.as_float('a'), 1.0)
def test_as_list(self):
self.assertEqual(self.conf.as_list('b'), ['x', 'y'])
|
from nose2 import config
from nose2.compat import unittest
class TestConfigSession(unittest.TestCase):
def test_can_create_session(self):
config.Session()
class TestConfig(unittest.TestCase):
def setUp(self):
self.conf = config.Config([('a', ' 1 '), ('b', ' x\n y ')])
def test_as_int(self):
self.assertEqual(self.conf.as_int('a'), 1)
def test_as_str(self):
self.assertEqual(self.conf.as_str('a'), '1')
self.assertEqual(self.conf.as_str('b'), 'x\n y')
self.assertEqual(self.conf.as_str('missing', 'default'), 'default')
def test_as_bool(self):
self.assertEqual(self.conf.as_bool('a'), True)
def test_as_float(self):
self.assertAlmostEqual(self.conf.as_float('a'), 1.0)
def test_as_list(self):
self.assertEqual(self.conf.as_list('b'), ['x', 'y'])
Add test for as_bool bugfrom nose2 import config
from nose2.compat import unittest
class TestConfigSession(unittest.TestCase):
def test_can_create_session(self):
config.Session()
class TestConfig(unittest.TestCase):
def setUp(self):
self.conf = config.Config([
('a', ' 1 '), ('b', ' x\n y '), ('c', '0')])
def test_as_int(self):
self.assertEqual(self.conf.as_int('a'), 1)
def test_as_str(self):
self.assertEqual(self.conf.as_str('a'), '1')
self.assertEqual(self.conf.as_str('b'), 'x\n y')
self.assertEqual(self.conf.as_str('missing', 'default'), 'default')
def test_as_bool(self):
self.assertEqual(self.conf.as_bool('a'), True)
self.assertEqual(self.conf.as_bool('c'), False)
def test_as_float(self):
self.assertAlmostEqual(self.conf.as_float('a'), 1.0)
def test_as_list(self):
self.assertEqual(self.conf.as_list('b'), ['x', 'y'])
|
<commit_before>from nose2 import config
from nose2.compat import unittest
class TestConfigSession(unittest.TestCase):
def test_can_create_session(self):
config.Session()
class TestConfig(unittest.TestCase):
def setUp(self):
self.conf = config.Config([('a', ' 1 '), ('b', ' x\n y ')])
def test_as_int(self):
self.assertEqual(self.conf.as_int('a'), 1)
def test_as_str(self):
self.assertEqual(self.conf.as_str('a'), '1')
self.assertEqual(self.conf.as_str('b'), 'x\n y')
self.assertEqual(self.conf.as_str('missing', 'default'), 'default')
def test_as_bool(self):
self.assertEqual(self.conf.as_bool('a'), True)
def test_as_float(self):
self.assertAlmostEqual(self.conf.as_float('a'), 1.0)
def test_as_list(self):
self.assertEqual(self.conf.as_list('b'), ['x', 'y'])
<commit_msg>Add test for as_bool bug<commit_after>from nose2 import config
from nose2.compat import unittest
class TestConfigSession(unittest.TestCase):
def test_can_create_session(self):
config.Session()
class TestConfig(unittest.TestCase):
def setUp(self):
self.conf = config.Config([
('a', ' 1 '), ('b', ' x\n y '), ('c', '0')])
def test_as_int(self):
self.assertEqual(self.conf.as_int('a'), 1)
def test_as_str(self):
self.assertEqual(self.conf.as_str('a'), '1')
self.assertEqual(self.conf.as_str('b'), 'x\n y')
self.assertEqual(self.conf.as_str('missing', 'default'), 'default')
def test_as_bool(self):
self.assertEqual(self.conf.as_bool('a'), True)
self.assertEqual(self.conf.as_bool('c'), False)
def test_as_float(self):
self.assertAlmostEqual(self.conf.as_float('a'), 1.0)
def test_as_list(self):
self.assertEqual(self.conf.as_list('b'), ['x', 'y'])
|
d45f71966ffa298245e3577126a5bf299ba1e36a
|
submissions/faeludire/Day_5/count_vowels.py
|
submissions/faeludire/Day_5/count_vowels.py
|
#Day 5: Count Vowels
#This program counts the number of vowels in the entered string.
#For added complexity, it reports a sum of each vowel found in a long text.
inputString = raw_input("Enter the string to be evaluated:")
lowerCaseString = str.lower(inputString)
convertedListString = list(lowerCaseString)
aNum = convertedListString.count('a')
eNum = convertedListString.count('e')
iNum = convertedListString.count('i')
oNum = convertedListString.count('o')
uNum = convertedListString.count('u')
totalVowelsFound = aNum + eNum + iNum + oNum + uNum
print "Total number of vowels are: ", totalVowelsFound
print "Total number of 'a' found = ", aNum
print "Total number of 'e' found = ", eNum
print "Total number of 'i' found = ", iNum
print "Total number of 'o' found = ", oNum
print "Total number of 'u' found = ", uNum
|
Update submission for Day 5
|
Update submission for Day 5
|
Python
|
mit
|
ConsonanceNg/100_Days_of_code_submissions,ConsonanceNg/100_Days_of_code_submissions,ConsonanceNg/100_Days_of_code_submissions,ConsonanceNg/100_Days_of_code_submissions,ConsonanceNg/100_Days_of_code_submissions,ConsonanceNg/100_Days_of_code_submissions,ConsonanceNg/100_Days_of_code_submissions,ConsonanceNg/100_Days_of_code_submissions
|
Update submission for Day 5
|
#Day 5: Count Vowels
#This program counts the number of vowels in the entered string.
#For added complexity, it reports a sum of each vowel found in a long text.
inputString = raw_input("Enter the string to be evaluated:")
lowerCaseString = str.lower(inputString)
convertedListString = list(lowerCaseString)
aNum = convertedListString.count('a')
eNum = convertedListString.count('e')
iNum = convertedListString.count('i')
oNum = convertedListString.count('o')
uNum = convertedListString.count('u')
totalVowelsFound = aNum + eNum + iNum + oNum + uNum
print "Total number of vowels are: ", totalVowelsFound
print "Total number of 'a' found = ", aNum
print "Total number of 'e' found = ", eNum
print "Total number of 'i' found = ", iNum
print "Total number of 'o' found = ", oNum
print "Total number of 'u' found = ", uNum
|
<commit_before><commit_msg>Update submission for Day 5<commit_after>
|
#Day 5: Count Vowels
#This program counts the number of vowels in the entered string.
#For added complexity, it reports a sum of each vowel found in a long text.
inputString = raw_input("Enter the string to be evaluated:")
lowerCaseString = str.lower(inputString)
convertedListString = list(lowerCaseString)
aNum = convertedListString.count('a')
eNum = convertedListString.count('e')
iNum = convertedListString.count('i')
oNum = convertedListString.count('o')
uNum = convertedListString.count('u')
totalVowelsFound = aNum + eNum + iNum + oNum + uNum
print "Total number of vowels are: ", totalVowelsFound
print "Total number of 'a' found = ", aNum
print "Total number of 'e' found = ", eNum
print "Total number of 'i' found = ", iNum
print "Total number of 'o' found = ", oNum
print "Total number of 'u' found = ", uNum
|
Update submission for Day 5#Day 5: Count Vowels
#This program counts the number of vowels in the entered string.
#For added complexity, it reports a sum of each vowel found in a long text.
inputString = raw_input("Enter the string to be evaluated:")
lowerCaseString = str.lower(inputString)
convertedListString = list(lowerCaseString)
aNum = convertedListString.count('a')
eNum = convertedListString.count('e')
iNum = convertedListString.count('i')
oNum = convertedListString.count('o')
uNum = convertedListString.count('u')
totalVowelsFound = aNum + eNum + iNum + oNum + uNum
print "Total number of vowels are: ", totalVowelsFound
print "Total number of 'a' found = ", aNum
print "Total number of 'e' found = ", eNum
print "Total number of 'i' found = ", iNum
print "Total number of 'o' found = ", oNum
print "Total number of 'u' found = ", uNum
|
<commit_before><commit_msg>Update submission for Day 5<commit_after>#Day 5: Count Vowels
#This program counts the number of vowels in the entered string.
#For added complexity, it reports a sum of each vowel found in a long text.
inputString = raw_input("Enter the string to be evaluated:")
lowerCaseString = str.lower(inputString)
convertedListString = list(lowerCaseString)
aNum = convertedListString.count('a')
eNum = convertedListString.count('e')
iNum = convertedListString.count('i')
oNum = convertedListString.count('o')
uNum = convertedListString.count('u')
totalVowelsFound = aNum + eNum + iNum + oNum + uNum
print "Total number of vowels are: ", totalVowelsFound
print "Total number of 'a' found = ", aNum
print "Total number of 'e' found = ", eNum
print "Total number of 'i' found = ", iNum
print "Total number of 'o' found = ", oNum
print "Total number of 'u' found = ", uNum
|
|
6a3eda2781f1ea4ed7106313fdc5b7c40786304c
|
tests/integration/mci/test_backwards_navigation_after_submission.py
|
tests/integration/mci/test_backwards_navigation_after_submission.py
|
from .test_happy_path import TestHappyPath
class TestbackwardsNavigationAfterSubmission(TestHappyPath):
def test_backwards_navigation(self):
self.test_happy_path()
resp = self.client.get('/submission', follow_redirects=False)
self.assertEquals(resp.status_code, 302)
self.assertRegexpMatches(resp.headers['Location'], '\/thank-you$')
resp = self.client.get('/questionnaire', follow_redirects=False)
self.assertEquals(resp.status_code, 302)
self.assertRegexpMatches(resp.headers['Location'], '\/thank-you$')
resp = self.client.get('/landing-page', follow_redirects=False)
self.assertEquals(resp.status_code, 302)
self.assertRegexpMatches(resp.headers['Location'], '\/thank-you$')
|
Add test for navigating backwards, after submission - failing
|
Add test for navigating backwards, after submission - failing
|
Python
|
mit
|
ONSdigital/eq-survey-runner,ONSdigital/eq-survey-runner,ONSdigital/eq-survey-runner,ONSdigital/eq-survey-runner
|
Add test for navigating backwards, after submission - failing
|
from .test_happy_path import TestHappyPath
class TestbackwardsNavigationAfterSubmission(TestHappyPath):
def test_backwards_navigation(self):
self.test_happy_path()
resp = self.client.get('/submission', follow_redirects=False)
self.assertEquals(resp.status_code, 302)
self.assertRegexpMatches(resp.headers['Location'], '\/thank-you$')
resp = self.client.get('/questionnaire', follow_redirects=False)
self.assertEquals(resp.status_code, 302)
self.assertRegexpMatches(resp.headers['Location'], '\/thank-you$')
resp = self.client.get('/landing-page', follow_redirects=False)
self.assertEquals(resp.status_code, 302)
self.assertRegexpMatches(resp.headers['Location'], '\/thank-you$')
|
<commit_before><commit_msg>Add test for navigating backwards, after submission - failing<commit_after>
|
from .test_happy_path import TestHappyPath
class TestbackwardsNavigationAfterSubmission(TestHappyPath):
def test_backwards_navigation(self):
self.test_happy_path()
resp = self.client.get('/submission', follow_redirects=False)
self.assertEquals(resp.status_code, 302)
self.assertRegexpMatches(resp.headers['Location'], '\/thank-you$')
resp = self.client.get('/questionnaire', follow_redirects=False)
self.assertEquals(resp.status_code, 302)
self.assertRegexpMatches(resp.headers['Location'], '\/thank-you$')
resp = self.client.get('/landing-page', follow_redirects=False)
self.assertEquals(resp.status_code, 302)
self.assertRegexpMatches(resp.headers['Location'], '\/thank-you$')
|
Add test for navigating backwards, after submission - failingfrom .test_happy_path import TestHappyPath
class TestbackwardsNavigationAfterSubmission(TestHappyPath):
def test_backwards_navigation(self):
self.test_happy_path()
resp = self.client.get('/submission', follow_redirects=False)
self.assertEquals(resp.status_code, 302)
self.assertRegexpMatches(resp.headers['Location'], '\/thank-you$')
resp = self.client.get('/questionnaire', follow_redirects=False)
self.assertEquals(resp.status_code, 302)
self.assertRegexpMatches(resp.headers['Location'], '\/thank-you$')
resp = self.client.get('/landing-page', follow_redirects=False)
self.assertEquals(resp.status_code, 302)
self.assertRegexpMatches(resp.headers['Location'], '\/thank-you$')
|
<commit_before><commit_msg>Add test for navigating backwards, after submission - failing<commit_after>from .test_happy_path import TestHappyPath
class TestbackwardsNavigationAfterSubmission(TestHappyPath):
def test_backwards_navigation(self):
self.test_happy_path()
resp = self.client.get('/submission', follow_redirects=False)
self.assertEquals(resp.status_code, 302)
self.assertRegexpMatches(resp.headers['Location'], '\/thank-you$')
resp = self.client.get('/questionnaire', follow_redirects=False)
self.assertEquals(resp.status_code, 302)
self.assertRegexpMatches(resp.headers['Location'], '\/thank-you$')
resp = self.client.get('/landing-page', follow_redirects=False)
self.assertEquals(resp.status_code, 302)
self.assertRegexpMatches(resp.headers['Location'], '\/thank-you$')
|
|
1b2f9c992982a288d0e7e20b0a1eb96b2fd8a12c
|
channels.py
|
channels.py
|
from django.utils import simplejson
from google.appengine.api import channel, memcache
class Channel():
token = None
address = None
cached = True
message = {}
def __init__(self, address):
self.address = address
self.token = memcache.get("token_%s" % self.address)
if self.token is None:
self.token = channel.create_channel(self.address)
self.cached = False
memcache.set("token_%s" % self.address, self.token, time=7200)
def send(self):
channel.send_message(self.address, simplejson.dumps(self.message))
def queueLink(self, link):
if 'links' not in self.message:
self.message['links'] = []
link_message = {}
link_message['id'] = link.key().id_or_name()
link_message['url'] = link.url
link_message['sender'] = link.sender.address
self.message['links'].push(link_message)
def sendLink(self, link):
self.queueLink(link)
self.send()
|
Split functionality of the Channel API into a separate file and refactored it a bit. Not functional.
|
Split functionality of the Channel API into a separate file and refactored it a bit. Not functional.
|
Python
|
mit
|
2cloud/AppEngine,2cloud/AppEngine
|
Split functionality of the Channel API into a separate file and refactored it a bit. Not functional.
|
from django.utils import simplejson
from google.appengine.api import channel, memcache
class Channel():
token = None
address = None
cached = True
message = {}
def __init__(self, address):
self.address = address
self.token = memcache.get("token_%s" % self.address)
if self.token is None:
self.token = channel.create_channel(self.address)
self.cached = False
memcache.set("token_%s" % self.address, self.token, time=7200)
def send(self):
channel.send_message(self.address, simplejson.dumps(self.message))
def queueLink(self, link):
if 'links' not in self.message:
self.message['links'] = []
link_message = {}
link_message['id'] = link.key().id_or_name()
link_message['url'] = link.url
link_message['sender'] = link.sender.address
self.message['links'].push(link_message)
def sendLink(self, link):
self.queueLink(link)
self.send()
|
<commit_before><commit_msg>Split functionality of the Channel API into a separate file and refactored it a bit. Not functional.<commit_after>
|
from django.utils import simplejson
from google.appengine.api import channel, memcache
class Channel():
token = None
address = None
cached = True
message = {}
def __init__(self, address):
self.address = address
self.token = memcache.get("token_%s" % self.address)
if self.token is None:
self.token = channel.create_channel(self.address)
self.cached = False
memcache.set("token_%s" % self.address, self.token, time=7200)
def send(self):
channel.send_message(self.address, simplejson.dumps(self.message))
def queueLink(self, link):
if 'links' not in self.message:
self.message['links'] = []
link_message = {}
link_message['id'] = link.key().id_or_name()
link_message['url'] = link.url
link_message['sender'] = link.sender.address
self.message['links'].push(link_message)
def sendLink(self, link):
self.queueLink(link)
self.send()
|
Split functionality of the Channel API into a separate file and refactored it a bit. Not functional.from django.utils import simplejson
from google.appengine.api import channel, memcache
class Channel():
token = None
address = None
cached = True
message = {}
def __init__(self, address):
self.address = address
self.token = memcache.get("token_%s" % self.address)
if self.token is None:
self.token = channel.create_channel(self.address)
self.cached = False
memcache.set("token_%s" % self.address, self.token, time=7200)
def send(self):
channel.send_message(self.address, simplejson.dumps(self.message))
def queueLink(self, link):
if 'links' not in self.message:
self.message['links'] = []
link_message = {}
link_message['id'] = link.key().id_or_name()
link_message['url'] = link.url
link_message['sender'] = link.sender.address
self.message['links'].push(link_message)
def sendLink(self, link):
self.queueLink(link)
self.send()
|
<commit_before><commit_msg>Split functionality of the Channel API into a separate file and refactored it a bit. Not functional.<commit_after>from django.utils import simplejson
from google.appengine.api import channel, memcache
class Channel():
token = None
address = None
cached = True
message = {}
def __init__(self, address):
self.address = address
self.token = memcache.get("token_%s" % self.address)
if self.token is None:
self.token = channel.create_channel(self.address)
self.cached = False
memcache.set("token_%s" % self.address, self.token, time=7200)
def send(self):
channel.send_message(self.address, simplejson.dumps(self.message))
def queueLink(self, link):
if 'links' not in self.message:
self.message['links'] = []
link_message = {}
link_message['id'] = link.key().id_or_name()
link_message['url'] = link.url
link_message['sender'] = link.sender.address
self.message['links'].push(link_message)
def sendLink(self, link):
self.queueLink(link)
self.send()
|
|
292468de015338bde2e7ddbe22a518391ee767c4
|
application/config.py
|
application/config.py
|
import os
class Config(object):
""" This class configures the parameters to be used in a production enviroment"""
CSRF_ENABLED = True
SECRET_KEY = os.environ["SECRET_KEY"]
SQLALCHEMY_DATABASE_URI = os.environ["SQLALCHEMY_DATABASE_URI"]
class Test(object):
""" This class configures the parameters to be used in a test enviroment"""
CSRF_ENABLED = True
SQLALCHEMY_TEST_DATABASE_URI = os.environ["SQLALCHEMY_TEST_DATABASE_URI"]
SECRET_KEY = os.environ["SECRET_KEY"]
|
Fix SQLACHEMY_DATABASE_URI for the tests database
|
[Bug] Fix SQLACHEMY_DATABASE_URI for the tests database
|
Python
|
mit
|
CharlesJonah/bucket_list_api,CharlesJonah/bucket_list_api
|
[Bug] Fix SQLACHEMY_DATABASE_URI for the tests database
|
import os
class Config(object):
""" This class configures the parameters to be used in a production enviroment"""
CSRF_ENABLED = True
SECRET_KEY = os.environ["SECRET_KEY"]
SQLALCHEMY_DATABASE_URI = os.environ["SQLALCHEMY_DATABASE_URI"]
class Test(object):
""" This class configures the parameters to be used in a test enviroment"""
CSRF_ENABLED = True
SQLALCHEMY_TEST_DATABASE_URI = os.environ["SQLALCHEMY_TEST_DATABASE_URI"]
SECRET_KEY = os.environ["SECRET_KEY"]
|
<commit_before><commit_msg>[Bug] Fix SQLACHEMY_DATABASE_URI for the tests database<commit_after>
|
import os
class Config(object):
""" This class configures the parameters to be used in a production enviroment"""
CSRF_ENABLED = True
SECRET_KEY = os.environ["SECRET_KEY"]
SQLALCHEMY_DATABASE_URI = os.environ["SQLALCHEMY_DATABASE_URI"]
class Test(object):
""" This class configures the parameters to be used in a test enviroment"""
CSRF_ENABLED = True
SQLALCHEMY_TEST_DATABASE_URI = os.environ["SQLALCHEMY_TEST_DATABASE_URI"]
SECRET_KEY = os.environ["SECRET_KEY"]
|
[Bug] Fix SQLACHEMY_DATABASE_URI for the tests databaseimport os
class Config(object):
""" This class configures the parameters to be used in a production enviroment"""
CSRF_ENABLED = True
SECRET_KEY = os.environ["SECRET_KEY"]
SQLALCHEMY_DATABASE_URI = os.environ["SQLALCHEMY_DATABASE_URI"]
class Test(object):
""" This class configures the parameters to be used in a test enviroment"""
CSRF_ENABLED = True
SQLALCHEMY_TEST_DATABASE_URI = os.environ["SQLALCHEMY_TEST_DATABASE_URI"]
SECRET_KEY = os.environ["SECRET_KEY"]
|
<commit_before><commit_msg>[Bug] Fix SQLACHEMY_DATABASE_URI for the tests database<commit_after>import os
class Config(object):
""" This class configures the parameters to be used in a production enviroment"""
CSRF_ENABLED = True
SECRET_KEY = os.environ["SECRET_KEY"]
SQLALCHEMY_DATABASE_URI = os.environ["SQLALCHEMY_DATABASE_URI"]
class Test(object):
""" This class configures the parameters to be used in a test enviroment"""
CSRF_ENABLED = True
SQLALCHEMY_TEST_DATABASE_URI = os.environ["SQLALCHEMY_TEST_DATABASE_URI"]
SECRET_KEY = os.environ["SECRET_KEY"]
|
|
1984bf841dd45b6f49d9981aaa61b10205db2dd4
|
scripts/objectsize_recovery.py
|
scripts/objectsize_recovery.py
|
#!/usr/bin/env python
# Copyright (c) 2011 Stanford University
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR(S) DISCLAIM ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL AUTHORS BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Generates data for a recovery performance graph.
Keeps partition size constant and scales the number of objects.
"""
from __future__ import division
from common import *
import recovery
import subprocess
dat = open('%s/recovery/objectsize_scale.data' % top_path, 'w')
for objectSize in (8192, 4096, 2048, 1024, 512, 256, 128):
args = {}
args['numBackups'] = 6
args['numPartitions'] = 1
args['objectSize'] = objectSize
args['disk'] = True
numObjectsPerMb = 2**20 / (objectSize + 40)
print('Running with %d objects of size %d' % (numObjectsPerMb * 400, objectSize))
while True:
try:
r= recovery.recover(
oldMasterArgs='-m 3000',
numObjects=int(numObjectsPerMb * 400),
**args)
except subprocess.CalledProcessError, e:
print e
else:
break
print 'Result', r
dat.write('%d\t%d\n' % (objectSize, r['ns']))
dat.flush()
|
Add script to generate recovery time versus object size data
|
Add script to generate recovery time versus object size data
|
Python
|
isc
|
mrdiegoa/ramcloud,DavidLi2010/ramcloud,Frank-Wu/RamCloud,Frank-Wu/RamCloud,rstutsman/RAMCloud,matrix207/RAMCloud,jblomer/ramcloud,anirajk/RAMCloud,jcarreira/ramcloud,QingkaiLu/RAMCloud,Frank-Wu/RamCloud,y-higuchi/ramcloud,behnamm/cs244b_project,matrix207/RAMCloud,y-higuchi/ramcloud,jcarreira/ramcloud,IMCG/RamCloud,jcarreira/ramcloud,DavidLi2010/ramcloud,taschik/ramcloud,Frank-Wu/RamCloud,behnamm/cs244b_project,QingkaiLu/RAMCloud,matrix207/RAMCloud,utah-scs/RAMCloud,utah-scs/RAMCloud,SMatsushi/RAMCloud,jcarreira/ramcloud,anirajk/RAMCloud,behnamm/cs244b_project,jcarreira/ramcloud,taschik/ramcloud,jblomer/ramcloud,jblomer/ramcloud,IMCG/RamCloud,utah-scs/RAMCloud,taschik/ramcloud,alexandermerritt/ramcloud,rstutsman/RAMCloud,y-higuchi/ramcloud,Frank-Wu/RamCloud,alexandermerritt/ramcloud,rstutsman/RAMCloud,taschik/ramcloud-load-manager,mrdiegoa/ramcloud,jblomer/ramcloud,mrdiegoa/ramcloud,IMCG/RamCloud,behnamm/cs244b_project,QingkaiLu/RAMCloud,behnamm/cs244b_project,utah-scs/RAMCloud,matrix207/RAMCloud,taschik/ramcloud-load-manager,utah-scs/RAMCloud,taschik/ramcloud-load-manager,SMatsushi/RAMCloud,QingkaiLu/RAMCloud,mrdiegoa/ramcloud,SMatsushi/RAMCloud,SMatsushi/RAMCloud,alexandermerritt/ramcloud,taschik/ramcloud-load-manager,taschik/ramcloud,DavidLi2010/ramcloud,SMatsushi/RAMCloud,utah-scs/RAMCloud,alexandermerritt/ramcloud,taschik/ramcloud,jblomer/ramcloud,SMatsushi/RAMCloud,rstutsman/RAMCloud,IMCG/RamCloud,y-higuchi/ramcloud,matrix207/RAMCloud,y-higuchi/ramcloud,IMCG/RamCloud,DavidLi2010/ramcloud,mrdiegoa/ramcloud,anirajk/RAMCloud,alexandermerritt/ramcloud,jblomer/ramcloud,rstutsman/RAMCloud,DavidLi2010/ramcloud,mrdiegoa/ramcloud,Frank-Wu/RamCloud,rstutsman/RAMCloud,anirajk/RAMCloud,behnamm/cs244b_project,jcarreira/ramcloud,anirajk/RAMCloud,QingkaiLu/RAMCloud,IMCG/RamCloud,QingkaiLu/RAMCloud,y-higuchi/ramcloud,matrix207/RAMCloud,taschik/ramcloud-load-manager,anirajk/RAMCloud,DavidLi2010/ramcloud,alexandermerritt/ramcloud
|
Add script to generate recovery time versus object size data
|
#!/usr/bin/env python
# Copyright (c) 2011 Stanford University
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR(S) DISCLAIM ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL AUTHORS BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Generates data for a recovery performance graph.
Keeps partition size constant and scales the number of objects.
"""
from __future__ import division
from common import *
import recovery
import subprocess
dat = open('%s/recovery/objectsize_scale.data' % top_path, 'w')
for objectSize in (8192, 4096, 2048, 1024, 512, 256, 128):
args = {}
args['numBackups'] = 6
args['numPartitions'] = 1
args['objectSize'] = objectSize
args['disk'] = True
numObjectsPerMb = 2**20 / (objectSize + 40)
print('Running with %d objects of size %d' % (numObjectsPerMb * 400, objectSize))
while True:
try:
r= recovery.recover(
oldMasterArgs='-m 3000',
numObjects=int(numObjectsPerMb * 400),
**args)
except subprocess.CalledProcessError, e:
print e
else:
break
print 'Result', r
dat.write('%d\t%d\n' % (objectSize, r['ns']))
dat.flush()
|
<commit_before><commit_msg>Add script to generate recovery time versus object size data<commit_after>
|
#!/usr/bin/env python
# Copyright (c) 2011 Stanford University
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR(S) DISCLAIM ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL AUTHORS BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Generates data for a recovery performance graph.
Keeps partition size constant and scales the number of objects.
"""
from __future__ import division
from common import *
import recovery
import subprocess
dat = open('%s/recovery/objectsize_scale.data' % top_path, 'w')
for objectSize in (8192, 4096, 2048, 1024, 512, 256, 128):
args = {}
args['numBackups'] = 6
args['numPartitions'] = 1
args['objectSize'] = objectSize
args['disk'] = True
numObjectsPerMb = 2**20 / (objectSize + 40)
print('Running with %d objects of size %d' % (numObjectsPerMb * 400, objectSize))
while True:
try:
r= recovery.recover(
oldMasterArgs='-m 3000',
numObjects=int(numObjectsPerMb * 400),
**args)
except subprocess.CalledProcessError, e:
print e
else:
break
print 'Result', r
dat.write('%d\t%d\n' % (objectSize, r['ns']))
dat.flush()
|
Add script to generate recovery time versus object size data#!/usr/bin/env python
# Copyright (c) 2011 Stanford University
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR(S) DISCLAIM ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL AUTHORS BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Generates data for a recovery performance graph.
Keeps partition size constant and scales the number of objects.
"""
from __future__ import division
from common import *
import recovery
import subprocess
dat = open('%s/recovery/objectsize_scale.data' % top_path, 'w')
for objectSize in (8192, 4096, 2048, 1024, 512, 256, 128):
args = {}
args['numBackups'] = 6
args['numPartitions'] = 1
args['objectSize'] = objectSize
args['disk'] = True
numObjectsPerMb = 2**20 / (objectSize + 40)
print('Running with %d objects of size %d' % (numObjectsPerMb * 400, objectSize))
while True:
try:
r= recovery.recover(
oldMasterArgs='-m 3000',
numObjects=int(numObjectsPerMb * 400),
**args)
except subprocess.CalledProcessError, e:
print e
else:
break
print 'Result', r
dat.write('%d\t%d\n' % (objectSize, r['ns']))
dat.flush()
|
<commit_before><commit_msg>Add script to generate recovery time versus object size data<commit_after>#!/usr/bin/env python
# Copyright (c) 2011 Stanford University
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR(S) DISCLAIM ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL AUTHORS BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Generates data for a recovery performance graph.
Keeps partition size constant and scales the number of objects.
"""
from __future__ import division
from common import *
import recovery
import subprocess
dat = open('%s/recovery/objectsize_scale.data' % top_path, 'w')
for objectSize in (8192, 4096, 2048, 1024, 512, 256, 128):
args = {}
args['numBackups'] = 6
args['numPartitions'] = 1
args['objectSize'] = objectSize
args['disk'] = True
numObjectsPerMb = 2**20 / (objectSize + 40)
print('Running with %d objects of size %d' % (numObjectsPerMb * 400, objectSize))
while True:
try:
r= recovery.recover(
oldMasterArgs='-m 3000',
numObjects=int(numObjectsPerMb * 400),
**args)
except subprocess.CalledProcessError, e:
print e
else:
break
print 'Result', r
dat.write('%d\t%d\n' % (objectSize, r['ns']))
dat.flush()
|
|
de179fafa95082b266ea0ca561b835058dc902de
|
bot/bot_optimized.py
|
bot/bot_optimized.py
|
import praw
import time
r = praw.Reddit('Comment parser example by u/_Daimon_')
multi_reddits = r.get_subreddit('gif+pics')
base = multi_reddits.get_comments(limit=1).next()
print "Base=", base.fullname
time.sleep(4)
while True:
mrc = multi_reddits.get_comments(limit=40, params={"before": base.fullname})
for com in mrc:
print com.fullname, com
if com.created_utc > base.created_utc:
base = com
time.sleep(1)
|
Add optimized bot. Reduce the number of queries
|
bot: Add optimized bot. Reduce the number of queries
Signed-off-by: Valentin Ilie <2677a908e9239f6354e2990b8c11cc5ac25eaf8a@gmail.com>
|
Python
|
unlicense
|
vilie/rp,vilie/rp
|
bot: Add optimized bot. Reduce the number of queries
Signed-off-by: Valentin Ilie <2677a908e9239f6354e2990b8c11cc5ac25eaf8a@gmail.com>
|
import praw
import time
r = praw.Reddit('Comment parser example by u/_Daimon_')
multi_reddits = r.get_subreddit('gif+pics')
base = multi_reddits.get_comments(limit=1).next()
print "Base=", base.fullname
time.sleep(4)
while True:
mrc = multi_reddits.get_comments(limit=40, params={"before": base.fullname})
for com in mrc:
print com.fullname, com
if com.created_utc > base.created_utc:
base = com
time.sleep(1)
|
<commit_before><commit_msg>bot: Add optimized bot. Reduce the number of queries
Signed-off-by: Valentin Ilie <2677a908e9239f6354e2990b8c11cc5ac25eaf8a@gmail.com><commit_after>
|
import praw
import time
r = praw.Reddit('Comment parser example by u/_Daimon_')
multi_reddits = r.get_subreddit('gif+pics')
base = multi_reddits.get_comments(limit=1).next()
print "Base=", base.fullname
time.sleep(4)
while True:
mrc = multi_reddits.get_comments(limit=40, params={"before": base.fullname})
for com in mrc:
print com.fullname, com
if com.created_utc > base.created_utc:
base = com
time.sleep(1)
|
bot: Add optimized bot. Reduce the number of queries
Signed-off-by: Valentin Ilie <2677a908e9239f6354e2990b8c11cc5ac25eaf8a@gmail.com>import praw
import time
r = praw.Reddit('Comment parser example by u/_Daimon_')
multi_reddits = r.get_subreddit('gif+pics')
base = multi_reddits.get_comments(limit=1).next()
print "Base=", base.fullname
time.sleep(4)
while True:
mrc = multi_reddits.get_comments(limit=40, params={"before": base.fullname})
for com in mrc:
print com.fullname, com
if com.created_utc > base.created_utc:
base = com
time.sleep(1)
|
<commit_before><commit_msg>bot: Add optimized bot. Reduce the number of queries
Signed-off-by: Valentin Ilie <2677a908e9239f6354e2990b8c11cc5ac25eaf8a@gmail.com><commit_after>import praw
import time
r = praw.Reddit('Comment parser example by u/_Daimon_')
multi_reddits = r.get_subreddit('gif+pics')
base = multi_reddits.get_comments(limit=1).next()
print "Base=", base.fullname
time.sleep(4)
while True:
mrc = multi_reddits.get_comments(limit=40, params={"before": base.fullname})
for com in mrc:
print com.fullname, com
if com.created_utc > base.created_utc:
base = com
time.sleep(1)
|
|
29536938fa6f780875b808e68dc2afbf8dabf615
|
tests/make_cal.py
|
tests/make_cal.py
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import datetime
import os
import sys
os.environ['TZ'] = 'UTC'
def crprint(s):
'''
Print using DOS line endings (to match Arduino code)
:param s:
:return:
'''
sys.stdout.write(s)
sys.stdout.write('\r\n')
parser = argparse.ArgumentParser(description='Make calendar')
parser.add_argument('epoch_year',
type=int,
default=1970,
help='Epoch year')
# parser.add_argument('end_year',
# type=int,
# default=2036,
# help='End year (exclusive)')
args = parser.parse_args()
# Map print to a function which always uses '\r\n' for newline, matching Arduino code.
print = crprint
unix_seconds_of_epoch = int(datetime.datetime(args.epoch_year, 1, 1).strftime('%s'))
print('# START')
print('# EPOCH: %d' % args.epoch_year)
for year in range(args.epoch_year, args.epoch_year + 68):
for month in range(1, 13):
d = datetime.datetime(year, month, 1)
yday = int(d.strftime('%j')) # Strip out leading zeros for easy comparison with Arduino output
# Compute seconds since epoch
unix_seconds = int(d.strftime('%s'))
alt_epoch_seconds = unix_seconds - unix_seconds_of_epoch
print(d.strftime('%Y-%m-%dT%H:%M:%S ' + str(alt_epoch_seconds) + ' ' + str(yday) + ' %w %A'))
print('# END')
|
Print a basic calendar for the start of each month from the epoch
|
Print a basic calendar for the start of each month from the epoch
|
Python
|
lgpl-2.1
|
stevemarple/RTCx,stevemarple/RTCx
|
Print a basic calendar for the start of each month from the epoch
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import datetime
import os
import sys
os.environ['TZ'] = 'UTC'
def crprint(s):
'''
Print using DOS line endings (to match Arduino code)
:param s:
:return:
'''
sys.stdout.write(s)
sys.stdout.write('\r\n')
parser = argparse.ArgumentParser(description='Make calendar')
parser.add_argument('epoch_year',
type=int,
default=1970,
help='Epoch year')
# parser.add_argument('end_year',
# type=int,
# default=2036,
# help='End year (exclusive)')
args = parser.parse_args()
# Map print to a function which always uses '\r\n' for newline, matching Arduino code.
print = crprint
unix_seconds_of_epoch = int(datetime.datetime(args.epoch_year, 1, 1).strftime('%s'))
print('# START')
print('# EPOCH: %d' % args.epoch_year)
for year in range(args.epoch_year, args.epoch_year + 68):
for month in range(1, 13):
d = datetime.datetime(year, month, 1)
yday = int(d.strftime('%j')) # Strip out leading zeros for easy comparison with Arduino output
# Compute seconds since epoch
unix_seconds = int(d.strftime('%s'))
alt_epoch_seconds = unix_seconds - unix_seconds_of_epoch
print(d.strftime('%Y-%m-%dT%H:%M:%S ' + str(alt_epoch_seconds) + ' ' + str(yday) + ' %w %A'))
print('# END')
|
<commit_before><commit_msg>Print a basic calendar for the start of each month from the epoch<commit_after>
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import datetime
import os
import sys
os.environ['TZ'] = 'UTC'
def crprint(s):
'''
Print using DOS line endings (to match Arduino code)
:param s:
:return:
'''
sys.stdout.write(s)
sys.stdout.write('\r\n')
parser = argparse.ArgumentParser(description='Make calendar')
parser.add_argument('epoch_year',
type=int,
default=1970,
help='Epoch year')
# parser.add_argument('end_year',
# type=int,
# default=2036,
# help='End year (exclusive)')
args = parser.parse_args()
# Map print to a function which always uses '\r\n' for newline, matching Arduino code.
print = crprint
unix_seconds_of_epoch = int(datetime.datetime(args.epoch_year, 1, 1).strftime('%s'))
print('# START')
print('# EPOCH: %d' % args.epoch_year)
for year in range(args.epoch_year, args.epoch_year + 68):
for month in range(1, 13):
d = datetime.datetime(year, month, 1)
yday = int(d.strftime('%j')) # Strip out leading zeros for easy comparison with Arduino output
# Compute seconds since epoch
unix_seconds = int(d.strftime('%s'))
alt_epoch_seconds = unix_seconds - unix_seconds_of_epoch
print(d.strftime('%Y-%m-%dT%H:%M:%S ' + str(alt_epoch_seconds) + ' ' + str(yday) + ' %w %A'))
print('# END')
|
Print a basic calendar for the start of each month from the epoch#!/usr/bin/env python
from __future__ import print_function
import argparse
import datetime
import os
import sys
os.environ['TZ'] = 'UTC'
def crprint(s):
'''
Print using DOS line endings (to match Arduino code)
:param s:
:return:
'''
sys.stdout.write(s)
sys.stdout.write('\r\n')
parser = argparse.ArgumentParser(description='Make calendar')
parser.add_argument('epoch_year',
type=int,
default=1970,
help='Epoch year')
# parser.add_argument('end_year',
# type=int,
# default=2036,
# help='End year (exclusive)')
args = parser.parse_args()
# Map print to a function which always uses '\r\n' for newline, matching Arduino code.
print = crprint
unix_seconds_of_epoch = int(datetime.datetime(args.epoch_year, 1, 1).strftime('%s'))
print('# START')
print('# EPOCH: %d' % args.epoch_year)
for year in range(args.epoch_year, args.epoch_year + 68):
for month in range(1, 13):
d = datetime.datetime(year, month, 1)
yday = int(d.strftime('%j')) # Strip out leading zeros for easy comparison with Arduino output
# Compute seconds since epoch
unix_seconds = int(d.strftime('%s'))
alt_epoch_seconds = unix_seconds - unix_seconds_of_epoch
print(d.strftime('%Y-%m-%dT%H:%M:%S ' + str(alt_epoch_seconds) + ' ' + str(yday) + ' %w %A'))
print('# END')
|
<commit_before><commit_msg>Print a basic calendar for the start of each month from the epoch<commit_after>#!/usr/bin/env python
from __future__ import print_function
import argparse
import datetime
import os
import sys
os.environ['TZ'] = 'UTC'
def crprint(s):
'''
Print using DOS line endings (to match Arduino code)
:param s:
:return:
'''
sys.stdout.write(s)
sys.stdout.write('\r\n')
parser = argparse.ArgumentParser(description='Make calendar')
parser.add_argument('epoch_year',
type=int,
default=1970,
help='Epoch year')
# parser.add_argument('end_year',
# type=int,
# default=2036,
# help='End year (exclusive)')
args = parser.parse_args()
# Map print to a function which always uses '\r\n' for newline, matching Arduino code.
print = crprint
unix_seconds_of_epoch = int(datetime.datetime(args.epoch_year, 1, 1).strftime('%s'))
print('# START')
print('# EPOCH: %d' % args.epoch_year)
for year in range(args.epoch_year, args.epoch_year + 68):
for month in range(1, 13):
d = datetime.datetime(year, month, 1)
yday = int(d.strftime('%j')) # Strip out leading zeros for easy comparison with Arduino output
# Compute seconds since epoch
unix_seconds = int(d.strftime('%s'))
alt_epoch_seconds = unix_seconds - unix_seconds_of_epoch
print(d.strftime('%Y-%m-%dT%H:%M:%S ' + str(alt_epoch_seconds) + ' ' + str(yday) + ' %w %A'))
print('# END')
|
|
9d269683fbfaaf2afc9b0ce171f8a11242144eb2
|
tests/test_vat.py
|
tests/test_vat.py
|
import unittest
from decimal import Decimal as D
from oscar_vat_moss import vat
class AddressTest(unittest.TestCase):
ADDRESSES = (
# Submission dictionary # Expected rate
({'line4': 'Vienna',
'country': 'AT',
'postcode': 1010}, D('0.20')),
({'line4': 'Berlin',
'country': 'DE',
'postcode': 10001}, D('0.19')),
({'line4': 'Jungholz',
'country': 'AT',
'postcode': 6691}, D('0.19')),
({'line4': 'Galway',
'country': 'IE',
}, D('0.23')),
)
def test_vat_lookup_rate_by_address(self):
for submission, expected_rate in self.ADDRESSES:
result_rate = vat.lookup_vat(submission)
self.assertEqual(result_rate,
expected_rate,
msg="Unexpected VAT rate returned for %s: %s" % (submission, result_rate))
class PhoneNumberTest(unittest.TestCase):
PHONE_NUMBERS = (
# Submission dictionary # Expected rate
({'phone_number': '+43 1 234 5678',
}, D('0.20')),
({'phone_number': '+49 170 12345',
}, D('0.19')),
)
def test_vat_lookup_rate_by_phone(self):
for submission, expected_rate in self.PHONE_NUMBERS:
result_rate = vat.lookup_vat(submission)
self.assertEqual(result_rate,
expected_rate,
msg="Unexpected VAT rate returned for %s: %s" % (submission, result_rate))
|
Add unit tests for vat.py
|
Add unit tests for vat.py
|
Python
|
bsd-3-clause
|
fghaas/django-oscar-vat_moss,arbrandes/django-oscar-vat_moss,hastexo/django-oscar-vat_moss,fghaas/django-oscar-vat_moss,arbrandes/django-oscar-vat_moss,hastexo/django-oscar-vat_moss
|
Add unit tests for vat.py
|
import unittest
from decimal import Decimal as D
from oscar_vat_moss import vat
class AddressTest(unittest.TestCase):
ADDRESSES = (
# Submission dictionary # Expected rate
({'line4': 'Vienna',
'country': 'AT',
'postcode': 1010}, D('0.20')),
({'line4': 'Berlin',
'country': 'DE',
'postcode': 10001}, D('0.19')),
({'line4': 'Jungholz',
'country': 'AT',
'postcode': 6691}, D('0.19')),
({'line4': 'Galway',
'country': 'IE',
}, D('0.23')),
)
def test_vat_lookup_rate_by_address(self):
for submission, expected_rate in self.ADDRESSES:
result_rate = vat.lookup_vat(submission)
self.assertEqual(result_rate,
expected_rate,
msg="Unexpected VAT rate returned for %s: %s" % (submission, result_rate))
class PhoneNumberTest(unittest.TestCase):
PHONE_NUMBERS = (
# Submission dictionary # Expected rate
({'phone_number': '+43 1 234 5678',
}, D('0.20')),
({'phone_number': '+49 170 12345',
}, D('0.19')),
)
def test_vat_lookup_rate_by_phone(self):
for submission, expected_rate in self.PHONE_NUMBERS:
result_rate = vat.lookup_vat(submission)
self.assertEqual(result_rate,
expected_rate,
msg="Unexpected VAT rate returned for %s: %s" % (submission, result_rate))
|
<commit_before><commit_msg>Add unit tests for vat.py<commit_after>
|
import unittest
from decimal import Decimal as D
from oscar_vat_moss import vat
class AddressTest(unittest.TestCase):
ADDRESSES = (
# Submission dictionary # Expected rate
({'line4': 'Vienna',
'country': 'AT',
'postcode': 1010}, D('0.20')),
({'line4': 'Berlin',
'country': 'DE',
'postcode': 10001}, D('0.19')),
({'line4': 'Jungholz',
'country': 'AT',
'postcode': 6691}, D('0.19')),
({'line4': 'Galway',
'country': 'IE',
}, D('0.23')),
)
def test_vat_lookup_rate_by_address(self):
for submission, expected_rate in self.ADDRESSES:
result_rate = vat.lookup_vat(submission)
self.assertEqual(result_rate,
expected_rate,
msg="Unexpected VAT rate returned for %s: %s" % (submission, result_rate))
class PhoneNumberTest(unittest.TestCase):
PHONE_NUMBERS = (
# Submission dictionary # Expected rate
({'phone_number': '+43 1 234 5678',
}, D('0.20')),
({'phone_number': '+49 170 12345',
}, D('0.19')),
)
def test_vat_lookup_rate_by_phone(self):
for submission, expected_rate in self.PHONE_NUMBERS:
result_rate = vat.lookup_vat(submission)
self.assertEqual(result_rate,
expected_rate,
msg="Unexpected VAT rate returned for %s: %s" % (submission, result_rate))
|
Add unit tests for vat.pyimport unittest
from decimal import Decimal as D
from oscar_vat_moss import vat
class AddressTest(unittest.TestCase):
ADDRESSES = (
# Submission dictionary # Expected rate
({'line4': 'Vienna',
'country': 'AT',
'postcode': 1010}, D('0.20')),
({'line4': 'Berlin',
'country': 'DE',
'postcode': 10001}, D('0.19')),
({'line4': 'Jungholz',
'country': 'AT',
'postcode': 6691}, D('0.19')),
({'line4': 'Galway',
'country': 'IE',
}, D('0.23')),
)
def test_vat_lookup_rate_by_address(self):
for submission, expected_rate in self.ADDRESSES:
result_rate = vat.lookup_vat(submission)
self.assertEqual(result_rate,
expected_rate,
msg="Unexpected VAT rate returned for %s: %s" % (submission, result_rate))
class PhoneNumberTest(unittest.TestCase):
PHONE_NUMBERS = (
# Submission dictionary # Expected rate
({'phone_number': '+43 1 234 5678',
}, D('0.20')),
({'phone_number': '+49 170 12345',
}, D('0.19')),
)
def test_vat_lookup_rate_by_phone(self):
for submission, expected_rate in self.PHONE_NUMBERS:
result_rate = vat.lookup_vat(submission)
self.assertEqual(result_rate,
expected_rate,
msg="Unexpected VAT rate returned for %s: %s" % (submission, result_rate))
|
<commit_before><commit_msg>Add unit tests for vat.py<commit_after>import unittest
from decimal import Decimal as D
from oscar_vat_moss import vat
class AddressTest(unittest.TestCase):
ADDRESSES = (
# Submission dictionary # Expected rate
({'line4': 'Vienna',
'country': 'AT',
'postcode': 1010}, D('0.20')),
({'line4': 'Berlin',
'country': 'DE',
'postcode': 10001}, D('0.19')),
({'line4': 'Jungholz',
'country': 'AT',
'postcode': 6691}, D('0.19')),
({'line4': 'Galway',
'country': 'IE',
}, D('0.23')),
)
def test_vat_lookup_rate_by_address(self):
for submission, expected_rate in self.ADDRESSES:
result_rate = vat.lookup_vat(submission)
self.assertEqual(result_rate,
expected_rate,
msg="Unexpected VAT rate returned for %s: %s" % (submission, result_rate))
class PhoneNumberTest(unittest.TestCase):
PHONE_NUMBERS = (
# Submission dictionary # Expected rate
({'phone_number': '+43 1 234 5678',
}, D('0.20')),
({'phone_number': '+49 170 12345',
}, D('0.19')),
)
def test_vat_lookup_rate_by_phone(self):
for submission, expected_rate in self.PHONE_NUMBERS:
result_rate = vat.lookup_vat(submission)
self.assertEqual(result_rate,
expected_rate,
msg="Unexpected VAT rate returned for %s: %s" % (submission, result_rate))
|
|
207642657e353003507243bdf863b1e27b7a04cd
|
benchmark/paddle/image/plotlog.py
|
benchmark/paddle/image/plotlog.py
|
#coding=utf-8
import sys
import argparse
import matplotlib.pyplot as plt
def parse_args():
parser = argparse.ArgumentParser('Parse Log')
parser.add_argument(
'--file_path', '-f', type=str, help='the path of the log file')
parser.add_argument(
'--sample_rate',
'-s',
type=float,
default=1.0,
help='the rate to take samples from log')
parser.add_argument(
'--log_period', '-p', type=int, default=1, help='the period of log')
args = parser.parse_args()
return args
def parse_file(file_name):
loss = []
error = []
with open(file_name) as f:
for i, line in enumerate(f):
line = line.strip()
if not line.startswith('pass'):
continue
line_split = line.split(' ')
if len(line_split) != 5:
continue
loss_str = line_split[2][:-1]
cur_loss = float(loss_str.split('=')[-1])
loss.append(cur_loss)
err_str = line_split[3][:-1]
cur_err = float(err_str.split('=')[-1])
error.append(cur_err)
accuracy = [1.0 - err for err in error]
return loss, accuracy
def sample(metric, sample_rate):
interval = int(1.0 / sample_rate)
if interval > len(metric):
return metric[:1]
num = len(metric) / interval
idx = [interval * i for i in range(num)]
metric_sample = [metric[id] for id in idx]
return metric_sample
def plot_metric(metric, batch_id, graph_title):
plt.figure()
plt.title(graph_title)
plt.plot(batch_id, metric)
plt.xlabel('batch')
plt.ylabel(graph_title)
plt.savefig(graph_title + '.jpg')
plt.close()
def main():
args = parse_args()
assert args.sample_rate > 0. and args.sample_rate <= 1.0, "The sample rate should in the range (0, 1]."
loss, accuracy = parse_file(args.file_path)
batch = [args.log_period * i for i in range(len(loss))]
batch_sample = sample(batch, args.sample_rate)
loss_sample = sample(loss, args.sample_rate)
accuracy_sample = sample(accuracy, args.sample_rate)
plot_metric(loss_sample, batch_sample, 'loss')
plot_metric(accuracy_sample, batch_sample, 'accuracy')
if __name__ == '__main__':
main()
|
Add script to plot learning curve
|
Add script to plot learning curve
|
Python
|
apache-2.0
|
reyoung/Paddle,QiJune/Paddle,lcy-seso/Paddle,Canpio/Paddle,jacquesqiao/Paddle,pkuyym/Paddle,putcn/Paddle,Canpio/Paddle,Canpio/Paddle,baidu/Paddle,luotao1/Paddle,tensor-tang/Paddle,reyoung/Paddle,pkuyym/Paddle,lcy-seso/Paddle,jacquesqiao/Paddle,PaddlePaddle/Paddle,reyoung/Paddle,Canpio/Paddle,lcy-seso/Paddle,lcy-seso/Paddle,PaddlePaddle/Paddle,putcn/Paddle,pkuyym/Paddle,PaddlePaddle/Paddle,luotao1/Paddle,luotao1/Paddle,Canpio/Paddle,PaddlePaddle/Paddle,QiJune/Paddle,QiJune/Paddle,chengduoZH/Paddle,reyoung/Paddle,pkuyym/Paddle,QiJune/Paddle,luotao1/Paddle,tensor-tang/Paddle,luotao1/Paddle,jacquesqiao/Paddle,tensor-tang/Paddle,chengduoZH/Paddle,Canpio/Paddle,reyoung/Paddle,jacquesqiao/Paddle,chengduoZH/Paddle,luotao1/Paddle,baidu/Paddle,pkuyym/Paddle,Canpio/Paddle,tensor-tang/Paddle,putcn/Paddle,QiJune/Paddle,reyoung/Paddle,PaddlePaddle/Paddle,luotao1/Paddle,chengduoZH/Paddle,lcy-seso/Paddle,tensor-tang/Paddle,jacquesqiao/Paddle,baidu/Paddle,jacquesqiao/Paddle,chengduoZH/Paddle,Canpio/Paddle,baidu/Paddle,baidu/Paddle,PaddlePaddle/Paddle,putcn/Paddle,putcn/Paddle,QiJune/Paddle,lcy-seso/Paddle,putcn/Paddle,PaddlePaddle/Paddle,pkuyym/Paddle
|
Add script to plot learning curve
|
#coding=utf-8
import sys
import argparse
import matplotlib.pyplot as plt
def parse_args():
parser = argparse.ArgumentParser('Parse Log')
parser.add_argument(
'--file_path', '-f', type=str, help='the path of the log file')
parser.add_argument(
'--sample_rate',
'-s',
type=float,
default=1.0,
help='the rate to take samples from log')
parser.add_argument(
'--log_period', '-p', type=int, default=1, help='the period of log')
args = parser.parse_args()
return args
def parse_file(file_name):
loss = []
error = []
with open(file_name) as f:
for i, line in enumerate(f):
line = line.strip()
if not line.startswith('pass'):
continue
line_split = line.split(' ')
if len(line_split) != 5:
continue
loss_str = line_split[2][:-1]
cur_loss = float(loss_str.split('=')[-1])
loss.append(cur_loss)
err_str = line_split[3][:-1]
cur_err = float(err_str.split('=')[-1])
error.append(cur_err)
accuracy = [1.0 - err for err in error]
return loss, accuracy
def sample(metric, sample_rate):
interval = int(1.0 / sample_rate)
if interval > len(metric):
return metric[:1]
num = len(metric) / interval
idx = [interval * i for i in range(num)]
metric_sample = [metric[id] for id in idx]
return metric_sample
def plot_metric(metric, batch_id, graph_title):
plt.figure()
plt.title(graph_title)
plt.plot(batch_id, metric)
plt.xlabel('batch')
plt.ylabel(graph_title)
plt.savefig(graph_title + '.jpg')
plt.close()
def main():
args = parse_args()
assert args.sample_rate > 0. and args.sample_rate <= 1.0, "The sample rate should in the range (0, 1]."
loss, accuracy = parse_file(args.file_path)
batch = [args.log_period * i for i in range(len(loss))]
batch_sample = sample(batch, args.sample_rate)
loss_sample = sample(loss, args.sample_rate)
accuracy_sample = sample(accuracy, args.sample_rate)
plot_metric(loss_sample, batch_sample, 'loss')
plot_metric(accuracy_sample, batch_sample, 'accuracy')
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to plot learning curve<commit_after>
|
#coding=utf-8
import sys
import argparse
import matplotlib.pyplot as plt
def parse_args():
parser = argparse.ArgumentParser('Parse Log')
parser.add_argument(
'--file_path', '-f', type=str, help='the path of the log file')
parser.add_argument(
'--sample_rate',
'-s',
type=float,
default=1.0,
help='the rate to take samples from log')
parser.add_argument(
'--log_period', '-p', type=int, default=1, help='the period of log')
args = parser.parse_args()
return args
def parse_file(file_name):
loss = []
error = []
with open(file_name) as f:
for i, line in enumerate(f):
line = line.strip()
if not line.startswith('pass'):
continue
line_split = line.split(' ')
if len(line_split) != 5:
continue
loss_str = line_split[2][:-1]
cur_loss = float(loss_str.split('=')[-1])
loss.append(cur_loss)
err_str = line_split[3][:-1]
cur_err = float(err_str.split('=')[-1])
error.append(cur_err)
accuracy = [1.0 - err for err in error]
return loss, accuracy
def sample(metric, sample_rate):
interval = int(1.0 / sample_rate)
if interval > len(metric):
return metric[:1]
num = len(metric) / interval
idx = [interval * i for i in range(num)]
metric_sample = [metric[id] for id in idx]
return metric_sample
def plot_metric(metric, batch_id, graph_title):
plt.figure()
plt.title(graph_title)
plt.plot(batch_id, metric)
plt.xlabel('batch')
plt.ylabel(graph_title)
plt.savefig(graph_title + '.jpg')
plt.close()
def main():
args = parse_args()
assert args.sample_rate > 0. and args.sample_rate <= 1.0, "The sample rate should in the range (0, 1]."
loss, accuracy = parse_file(args.file_path)
batch = [args.log_period * i for i in range(len(loss))]
batch_sample = sample(batch, args.sample_rate)
loss_sample = sample(loss, args.sample_rate)
accuracy_sample = sample(accuracy, args.sample_rate)
plot_metric(loss_sample, batch_sample, 'loss')
plot_metric(accuracy_sample, batch_sample, 'accuracy')
if __name__ == '__main__':
main()
|
Add script to plot learning curve#coding=utf-8
import sys
import argparse
import matplotlib.pyplot as plt
def parse_args():
parser = argparse.ArgumentParser('Parse Log')
parser.add_argument(
'--file_path', '-f', type=str, help='the path of the log file')
parser.add_argument(
'--sample_rate',
'-s',
type=float,
default=1.0,
help='the rate to take samples from log')
parser.add_argument(
'--log_period', '-p', type=int, default=1, help='the period of log')
args = parser.parse_args()
return args
def parse_file(file_name):
loss = []
error = []
with open(file_name) as f:
for i, line in enumerate(f):
line = line.strip()
if not line.startswith('pass'):
continue
line_split = line.split(' ')
if len(line_split) != 5:
continue
loss_str = line_split[2][:-1]
cur_loss = float(loss_str.split('=')[-1])
loss.append(cur_loss)
err_str = line_split[3][:-1]
cur_err = float(err_str.split('=')[-1])
error.append(cur_err)
accuracy = [1.0 - err for err in error]
return loss, accuracy
def sample(metric, sample_rate):
interval = int(1.0 / sample_rate)
if interval > len(metric):
return metric[:1]
num = len(metric) / interval
idx = [interval * i for i in range(num)]
metric_sample = [metric[id] for id in idx]
return metric_sample
def plot_metric(metric, batch_id, graph_title):
plt.figure()
plt.title(graph_title)
plt.plot(batch_id, metric)
plt.xlabel('batch')
plt.ylabel(graph_title)
plt.savefig(graph_title + '.jpg')
plt.close()
def main():
args = parse_args()
assert args.sample_rate > 0. and args.sample_rate <= 1.0, "The sample rate should in the range (0, 1]."
loss, accuracy = parse_file(args.file_path)
batch = [args.log_period * i for i in range(len(loss))]
batch_sample = sample(batch, args.sample_rate)
loss_sample = sample(loss, args.sample_rate)
accuracy_sample = sample(accuracy, args.sample_rate)
plot_metric(loss_sample, batch_sample, 'loss')
plot_metric(accuracy_sample, batch_sample, 'accuracy')
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to plot learning curve<commit_after>#coding=utf-8
import sys
import argparse
import matplotlib.pyplot as plt
def parse_args():
parser = argparse.ArgumentParser('Parse Log')
parser.add_argument(
'--file_path', '-f', type=str, help='the path of the log file')
parser.add_argument(
'--sample_rate',
'-s',
type=float,
default=1.0,
help='the rate to take samples from log')
parser.add_argument(
'--log_period', '-p', type=int, default=1, help='the period of log')
args = parser.parse_args()
return args
def parse_file(file_name):
loss = []
error = []
with open(file_name) as f:
for i, line in enumerate(f):
line = line.strip()
if not line.startswith('pass'):
continue
line_split = line.split(' ')
if len(line_split) != 5:
continue
loss_str = line_split[2][:-1]
cur_loss = float(loss_str.split('=')[-1])
loss.append(cur_loss)
err_str = line_split[3][:-1]
cur_err = float(err_str.split('=')[-1])
error.append(cur_err)
accuracy = [1.0 - err for err in error]
return loss, accuracy
def sample(metric, sample_rate):
interval = int(1.0 / sample_rate)
if interval > len(metric):
return metric[:1]
num = len(metric) / interval
idx = [interval * i for i in range(num)]
metric_sample = [metric[id] for id in idx]
return metric_sample
def plot_metric(metric, batch_id, graph_title):
plt.figure()
plt.title(graph_title)
plt.plot(batch_id, metric)
plt.xlabel('batch')
plt.ylabel(graph_title)
plt.savefig(graph_title + '.jpg')
plt.close()
def main():
args = parse_args()
assert args.sample_rate > 0. and args.sample_rate <= 1.0, "The sample rate should in the range (0, 1]."
loss, accuracy = parse_file(args.file_path)
batch = [args.log_period * i for i in range(len(loss))]
batch_sample = sample(batch, args.sample_rate)
loss_sample = sample(loss, args.sample_rate)
accuracy_sample = sample(accuracy, args.sample_rate)
plot_metric(loss_sample, batch_sample, 'loss')
plot_metric(accuracy_sample, batch_sample, 'accuracy')
if __name__ == '__main__':
main()
|
|
982fc34e7b4b78f49ef8b8c5c1842f410f394dc9
|
steps/app-switch-demo.py
|
steps/app-switch-demo.py
|
from behave import given, when, then
from appium import webdriver
import os
import time
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.support import expected_conditions as EC
import logging
logging.basicConfig(level=logging.DEBUG)
# Returns abs path relative to this file and not cwd
PATH = lambda p: os.path.abspath(
os.path.join(os.path.dirname(__file__), p)
)
@given('we have appium installed')
def step_impl(context):
desired_caps = {}
desired_caps['platformName'] = 'iOS'
desired_caps['platformVersion'] = '8.4'
desired_caps['deviceName'] = 'iPhone 6'
desired_caps['app'] = PATH('../bin/appium_demo.app.zip')
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
context.driver = driver
@when(u'user runs app, clicks on "{button_name}" button in app')
def step_impl(context, button_name):
WebDriverWait(context.driver, 10).until(
EC.visibility_of_element_located((By.ID, button_name))
)
context.driver.find_element_by_id('Launch').click()
logging.info(context.driver.current_context)
@then(u'user clicks on "{button_name}" button in website')
def step_impl(context, button_name):
time.sleep(3)
context.driver.switch_to.context('WEBVIEW_1')
WebDriverWait(context.driver, 10).until(
EC.visibility_of_element_located((By.ID, button_name))
)
logging.info(context.driver.contexts)
time.sleep(3)
context.driver.find_element_by_id('app-switch').click()
@then(u'it switches back to app')
def step_impl(context):
time.sleep(3)
context.driver.switch_to.context("NATIVE_APP")
logging.info(context.driver.contexts)
assert context.failed is False
|
Add implementation of the behave feature in steps
|
Add implementation of the behave feature in steps
|
Python
|
mit
|
avidas/reliability-demo
|
Add implementation of the behave feature in steps
|
from behave import given, when, then
from appium import webdriver
import os
import time
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.support import expected_conditions as EC
import logging
logging.basicConfig(level=logging.DEBUG)
# Returns abs path relative to this file and not cwd
PATH = lambda p: os.path.abspath(
os.path.join(os.path.dirname(__file__), p)
)
@given('we have appium installed')
def step_impl(context):
desired_caps = {}
desired_caps['platformName'] = 'iOS'
desired_caps['platformVersion'] = '8.4'
desired_caps['deviceName'] = 'iPhone 6'
desired_caps['app'] = PATH('../bin/appium_demo.app.zip')
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
context.driver = driver
@when(u'user runs app, clicks on "{button_name}" button in app')
def step_impl(context, button_name):
WebDriverWait(context.driver, 10).until(
EC.visibility_of_element_located((By.ID, button_name))
)
context.driver.find_element_by_id('Launch').click()
logging.info(context.driver.current_context)
@then(u'user clicks on "{button_name}" button in website')
def step_impl(context, button_name):
time.sleep(3)
context.driver.switch_to.context('WEBVIEW_1')
WebDriverWait(context.driver, 10).until(
EC.visibility_of_element_located((By.ID, button_name))
)
logging.info(context.driver.contexts)
time.sleep(3)
context.driver.find_element_by_id('app-switch').click()
@then(u'it switches back to app')
def step_impl(context):
time.sleep(3)
context.driver.switch_to.context("NATIVE_APP")
logging.info(context.driver.contexts)
assert context.failed is False
|
<commit_before><commit_msg>Add implementation of the behave feature in steps<commit_after>
|
from behave import given, when, then
from appium import webdriver
import os
import time
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.support import expected_conditions as EC
import logging
logging.basicConfig(level=logging.DEBUG)
# Returns abs path relative to this file and not cwd
PATH = lambda p: os.path.abspath(
os.path.join(os.path.dirname(__file__), p)
)
@given('we have appium installed')
def step_impl(context):
desired_caps = {}
desired_caps['platformName'] = 'iOS'
desired_caps['platformVersion'] = '8.4'
desired_caps['deviceName'] = 'iPhone 6'
desired_caps['app'] = PATH('../bin/appium_demo.app.zip')
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
context.driver = driver
@when(u'user runs app, clicks on "{button_name}" button in app')
def step_impl(context, button_name):
WebDriverWait(context.driver, 10).until(
EC.visibility_of_element_located((By.ID, button_name))
)
context.driver.find_element_by_id('Launch').click()
logging.info(context.driver.current_context)
@then(u'user clicks on "{button_name}" button in website')
def step_impl(context, button_name):
time.sleep(3)
context.driver.switch_to.context('WEBVIEW_1')
WebDriverWait(context.driver, 10).until(
EC.visibility_of_element_located((By.ID, button_name))
)
logging.info(context.driver.contexts)
time.sleep(3)
context.driver.find_element_by_id('app-switch').click()
@then(u'it switches back to app')
def step_impl(context):
time.sleep(3)
context.driver.switch_to.context("NATIVE_APP")
logging.info(context.driver.contexts)
assert context.failed is False
|
Add implementation of the behave feature in stepsfrom behave import given, when, then
from appium import webdriver
import os
import time
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.support import expected_conditions as EC
import logging
logging.basicConfig(level=logging.DEBUG)
# Returns abs path relative to this file and not cwd
PATH = lambda p: os.path.abspath(
os.path.join(os.path.dirname(__file__), p)
)
@given('we have appium installed')
def step_impl(context):
desired_caps = {}
desired_caps['platformName'] = 'iOS'
desired_caps['platformVersion'] = '8.4'
desired_caps['deviceName'] = 'iPhone 6'
desired_caps['app'] = PATH('../bin/appium_demo.app.zip')
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
context.driver = driver
@when(u'user runs app, clicks on "{button_name}" button in app')
def step_impl(context, button_name):
WebDriverWait(context.driver, 10).until(
EC.visibility_of_element_located((By.ID, button_name))
)
context.driver.find_element_by_id('Launch').click()
logging.info(context.driver.current_context)
@then(u'user clicks on "{button_name}" button in website')
def step_impl(context, button_name):
time.sleep(3)
context.driver.switch_to.context('WEBVIEW_1')
WebDriverWait(context.driver, 10).until(
EC.visibility_of_element_located((By.ID, button_name))
)
logging.info(context.driver.contexts)
time.sleep(3)
context.driver.find_element_by_id('app-switch').click()
@then(u'it switches back to app')
def step_impl(context):
time.sleep(3)
context.driver.switch_to.context("NATIVE_APP")
logging.info(context.driver.contexts)
assert context.failed is False
|
<commit_before><commit_msg>Add implementation of the behave feature in steps<commit_after>from behave import given, when, then
from appium import webdriver
import os
import time
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.support import expected_conditions as EC
import logging
logging.basicConfig(level=logging.DEBUG)
# Returns abs path relative to this file and not cwd
PATH = lambda p: os.path.abspath(
os.path.join(os.path.dirname(__file__), p)
)
@given('we have appium installed')
def step_impl(context):
desired_caps = {}
desired_caps['platformName'] = 'iOS'
desired_caps['platformVersion'] = '8.4'
desired_caps['deviceName'] = 'iPhone 6'
desired_caps['app'] = PATH('../bin/appium_demo.app.zip')
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
context.driver = driver
@when(u'user runs app, clicks on "{button_name}" button in app')
def step_impl(context, button_name):
WebDriverWait(context.driver, 10).until(
EC.visibility_of_element_located((By.ID, button_name))
)
context.driver.find_element_by_id('Launch').click()
logging.info(context.driver.current_context)
@then(u'user clicks on "{button_name}" button in website')
def step_impl(context, button_name):
time.sleep(3)
context.driver.switch_to.context('WEBVIEW_1')
WebDriverWait(context.driver, 10).until(
EC.visibility_of_element_located((By.ID, button_name))
)
logging.info(context.driver.contexts)
time.sleep(3)
context.driver.find_element_by_id('app-switch').click()
@then(u'it switches back to app')
def step_impl(context):
time.sleep(3)
context.driver.switch_to.context("NATIVE_APP")
logging.info(context.driver.contexts)
assert context.failed is False
|
|
4790b1afa5d0125948bb8fdbed6f5838d470a570
|
test/client/local_recognizer_test.py
|
test/client/local_recognizer_test.py
|
import unittest
import os
from speech_recognition import WavFile
from mycroft.client.speech.listener import RecognizerLoop
__author__ = 'seanfitz'
DATA_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), "data")
class LocalRecognizerTest(unittest.TestCase):
def setUp(self):
self.recognizer = RecognizerLoop.create_mycroft_recognizer(16000,
"en-us")
def testRecognizerWrapper(self):
source = WavFile(os.path.join(DATA_DIR, "hey_mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
source = WavFile(os.path.join(DATA_DIR, "mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
def testRecognitionInLongerUtterance(self):
source = WavFile(os.path.join(DATA_DIR, "weather_mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
|
import unittest
import os
from speech_recognition import WavFile
from mycroft.client.speech.listener import RecognizerLoop
__author__ = 'seanfitz'
DATA_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), "data")
class LocalRecognizerTest(unittest.TestCase):
def setUp(self):
rl = RecognizerLoop()
self.recognizer = RecognizerLoop.create_mycroft_recognizer(rl,
16000,
"en-us")
def testRecognizerWrapper(self):
source = WavFile(os.path.join(DATA_DIR, "hey_mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
source = WavFile(os.path.join(DATA_DIR, "mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
def testRecognitionInLongerUtterance(self):
source = WavFile(os.path.join(DATA_DIR, "weather_mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
|
Fix init of local recognizer
|
Fix init of local recognizer
|
Python
|
apache-2.0
|
MycroftAI/mycroft-core,forslund/mycroft-core,aatchison/mycroft-core,Dark5ide/mycroft-core,linuxipho/mycroft-core,forslund/mycroft-core,linuxipho/mycroft-core,MycroftAI/mycroft-core,aatchison/mycroft-core,Dark5ide/mycroft-core
|
import unittest
import os
from speech_recognition import WavFile
from mycroft.client.speech.listener import RecognizerLoop
__author__ = 'seanfitz'
DATA_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), "data")
class LocalRecognizerTest(unittest.TestCase):
def setUp(self):
self.recognizer = RecognizerLoop.create_mycroft_recognizer(16000,
"en-us")
def testRecognizerWrapper(self):
source = WavFile(os.path.join(DATA_DIR, "hey_mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
source = WavFile(os.path.join(DATA_DIR, "mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
def testRecognitionInLongerUtterance(self):
source = WavFile(os.path.join(DATA_DIR, "weather_mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
Fix init of local recognizer
|
import unittest
import os
from speech_recognition import WavFile
from mycroft.client.speech.listener import RecognizerLoop
__author__ = 'seanfitz'
DATA_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), "data")
class LocalRecognizerTest(unittest.TestCase):
def setUp(self):
rl = RecognizerLoop()
self.recognizer = RecognizerLoop.create_mycroft_recognizer(rl,
16000,
"en-us")
def testRecognizerWrapper(self):
source = WavFile(os.path.join(DATA_DIR, "hey_mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
source = WavFile(os.path.join(DATA_DIR, "mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
def testRecognitionInLongerUtterance(self):
source = WavFile(os.path.join(DATA_DIR, "weather_mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
|
<commit_before>import unittest
import os
from speech_recognition import WavFile
from mycroft.client.speech.listener import RecognizerLoop
__author__ = 'seanfitz'
DATA_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), "data")
class LocalRecognizerTest(unittest.TestCase):
def setUp(self):
self.recognizer = RecognizerLoop.create_mycroft_recognizer(16000,
"en-us")
def testRecognizerWrapper(self):
source = WavFile(os.path.join(DATA_DIR, "hey_mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
source = WavFile(os.path.join(DATA_DIR, "mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
def testRecognitionInLongerUtterance(self):
source = WavFile(os.path.join(DATA_DIR, "weather_mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
<commit_msg>Fix init of local recognizer<commit_after>
|
import unittest
import os
from speech_recognition import WavFile
from mycroft.client.speech.listener import RecognizerLoop
__author__ = 'seanfitz'
DATA_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), "data")
class LocalRecognizerTest(unittest.TestCase):
def setUp(self):
rl = RecognizerLoop()
self.recognizer = RecognizerLoop.create_mycroft_recognizer(rl,
16000,
"en-us")
def testRecognizerWrapper(self):
source = WavFile(os.path.join(DATA_DIR, "hey_mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
source = WavFile(os.path.join(DATA_DIR, "mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
def testRecognitionInLongerUtterance(self):
source = WavFile(os.path.join(DATA_DIR, "weather_mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
|
import unittest
import os
from speech_recognition import WavFile
from mycroft.client.speech.listener import RecognizerLoop
__author__ = 'seanfitz'
DATA_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), "data")
class LocalRecognizerTest(unittest.TestCase):
def setUp(self):
self.recognizer = RecognizerLoop.create_mycroft_recognizer(16000,
"en-us")
def testRecognizerWrapper(self):
source = WavFile(os.path.join(DATA_DIR, "hey_mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
source = WavFile(os.path.join(DATA_DIR, "mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
def testRecognitionInLongerUtterance(self):
source = WavFile(os.path.join(DATA_DIR, "weather_mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
Fix init of local recognizerimport unittest
import os
from speech_recognition import WavFile
from mycroft.client.speech.listener import RecognizerLoop
__author__ = 'seanfitz'
DATA_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), "data")
class LocalRecognizerTest(unittest.TestCase):
def setUp(self):
rl = RecognizerLoop()
self.recognizer = RecognizerLoop.create_mycroft_recognizer(rl,
16000,
"en-us")
def testRecognizerWrapper(self):
source = WavFile(os.path.join(DATA_DIR, "hey_mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
source = WavFile(os.path.join(DATA_DIR, "mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
def testRecognitionInLongerUtterance(self):
source = WavFile(os.path.join(DATA_DIR, "weather_mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
|
<commit_before>import unittest
import os
from speech_recognition import WavFile
from mycroft.client.speech.listener import RecognizerLoop
__author__ = 'seanfitz'
DATA_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), "data")
class LocalRecognizerTest(unittest.TestCase):
def setUp(self):
self.recognizer = RecognizerLoop.create_mycroft_recognizer(16000,
"en-us")
def testRecognizerWrapper(self):
source = WavFile(os.path.join(DATA_DIR, "hey_mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
source = WavFile(os.path.join(DATA_DIR, "mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
def testRecognitionInLongerUtterance(self):
source = WavFile(os.path.join(DATA_DIR, "weather_mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
<commit_msg>Fix init of local recognizer<commit_after>import unittest
import os
from speech_recognition import WavFile
from mycroft.client.speech.listener import RecognizerLoop
__author__ = 'seanfitz'
DATA_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), "data")
class LocalRecognizerTest(unittest.TestCase):
def setUp(self):
rl = RecognizerLoop()
self.recognizer = RecognizerLoop.create_mycroft_recognizer(rl,
16000,
"en-us")
def testRecognizerWrapper(self):
source = WavFile(os.path.join(DATA_DIR, "hey_mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
source = WavFile(os.path.join(DATA_DIR, "mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
def testRecognitionInLongerUtterance(self):
source = WavFile(os.path.join(DATA_DIR, "weather_mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
|
cd3a5bc00444266d417e09bba54230024c810888
|
examples/routes.py
|
examples/routes.py
|
import build.eve_nerd as eve_nerd
def print_route(v):
print((" %s to %s " % (v.points[0].entity.system.name, v.points[-1].entity.system.name)).center(80, "-"))
t = int(v.cost)
print("Estimated travel time: %d:%02d:%02d" % (t / 3600, (t % 3600) / 60, t % 60))
for x in range(len(v.points)):
r = v.points[x]
if r.type == eve_nerd.JUMP:
print("Jump to %s in %s" % (r.entity.name, r.entity.system.name))
elif r.type == eve_nerd.GATE:
print("Gate into %s" % (r.entity.system.name))
elif r.type == eve_nerd.WARP:
print("Warp to %s" % (r.entity.name))
elif r.type == eve_nerd.STRT:
print("Start at %s in %s" % (r.entity.name, r.entity.system.name))
print("")
# Load the universe
a = eve_nerd.Universe("mapDenormalize.csv", "mapJumps.csv")
# Go from D-P to G-M using a supercarrier preset
b = a.get_route(30003135, 30004696, eve_nerd.SUPERCARRIER)
print_route(b)
# Go from D-P to Period Basis using a Black Ops
b = a.get_route(30003135, 30004955, eve_nerd.BLACK_OPS)
print_route(b)
b = a.get_route(30003135, 30003632, eve_nerd.CARRIER)
print_route(b)
a.add_dynamic_bridge(40199046, 6.0)
b = a.get_route(30003135, 30004955, eve_nerd.BATTLECRUISER)
print_route(b)
a.add_static_bridge(40297263, 40297596)
b = a.get_route(30003135, 30004704, eve_nerd.BATTLECRUISER)
print_route(b)
c = eve_nerd.Parameters(8.0, 1.5, 10.0)
b = a.get_route(30003135, 30004704, c)
print_route(b)
|
Add an example (with wrong import path).
|
Add an example (with wrong import path).
|
Python
|
mit
|
StephenSwat/new-eden-route-derivation,StephenSwat/new-eden-route-derivation
|
Add an example (with wrong import path).
|
import build.eve_nerd as eve_nerd
def print_route(v):
print((" %s to %s " % (v.points[0].entity.system.name, v.points[-1].entity.system.name)).center(80, "-"))
t = int(v.cost)
print("Estimated travel time: %d:%02d:%02d" % (t / 3600, (t % 3600) / 60, t % 60))
for x in range(len(v.points)):
r = v.points[x]
if r.type == eve_nerd.JUMP:
print("Jump to %s in %s" % (r.entity.name, r.entity.system.name))
elif r.type == eve_nerd.GATE:
print("Gate into %s" % (r.entity.system.name))
elif r.type == eve_nerd.WARP:
print("Warp to %s" % (r.entity.name))
elif r.type == eve_nerd.STRT:
print("Start at %s in %s" % (r.entity.name, r.entity.system.name))
print("")
# Load the universe
a = eve_nerd.Universe("mapDenormalize.csv", "mapJumps.csv")
# Go from D-P to G-M using a supercarrier preset
b = a.get_route(30003135, 30004696, eve_nerd.SUPERCARRIER)
print_route(b)
# Go from D-P to Period Basis using a Black Ops
b = a.get_route(30003135, 30004955, eve_nerd.BLACK_OPS)
print_route(b)
b = a.get_route(30003135, 30003632, eve_nerd.CARRIER)
print_route(b)
a.add_dynamic_bridge(40199046, 6.0)
b = a.get_route(30003135, 30004955, eve_nerd.BATTLECRUISER)
print_route(b)
a.add_static_bridge(40297263, 40297596)
b = a.get_route(30003135, 30004704, eve_nerd.BATTLECRUISER)
print_route(b)
c = eve_nerd.Parameters(8.0, 1.5, 10.0)
b = a.get_route(30003135, 30004704, c)
print_route(b)
|
<commit_before><commit_msg>Add an example (with wrong import path).<commit_after>
|
import build.eve_nerd as eve_nerd
def print_route(v):
print((" %s to %s " % (v.points[0].entity.system.name, v.points[-1].entity.system.name)).center(80, "-"))
t = int(v.cost)
print("Estimated travel time: %d:%02d:%02d" % (t / 3600, (t % 3600) / 60, t % 60))
for x in range(len(v.points)):
r = v.points[x]
if r.type == eve_nerd.JUMP:
print("Jump to %s in %s" % (r.entity.name, r.entity.system.name))
elif r.type == eve_nerd.GATE:
print("Gate into %s" % (r.entity.system.name))
elif r.type == eve_nerd.WARP:
print("Warp to %s" % (r.entity.name))
elif r.type == eve_nerd.STRT:
print("Start at %s in %s" % (r.entity.name, r.entity.system.name))
print("")
# Load the universe
a = eve_nerd.Universe("mapDenormalize.csv", "mapJumps.csv")
# Go from D-P to G-M using a supercarrier preset
b = a.get_route(30003135, 30004696, eve_nerd.SUPERCARRIER)
print_route(b)
# Go from D-P to Period Basis using a Black Ops
b = a.get_route(30003135, 30004955, eve_nerd.BLACK_OPS)
print_route(b)
b = a.get_route(30003135, 30003632, eve_nerd.CARRIER)
print_route(b)
a.add_dynamic_bridge(40199046, 6.0)
b = a.get_route(30003135, 30004955, eve_nerd.BATTLECRUISER)
print_route(b)
a.add_static_bridge(40297263, 40297596)
b = a.get_route(30003135, 30004704, eve_nerd.BATTLECRUISER)
print_route(b)
c = eve_nerd.Parameters(8.0, 1.5, 10.0)
b = a.get_route(30003135, 30004704, c)
print_route(b)
|
Add an example (with wrong import path).import build.eve_nerd as eve_nerd
def print_route(v):
print((" %s to %s " % (v.points[0].entity.system.name, v.points[-1].entity.system.name)).center(80, "-"))
t = int(v.cost)
print("Estimated travel time: %d:%02d:%02d" % (t / 3600, (t % 3600) / 60, t % 60))
for x in range(len(v.points)):
r = v.points[x]
if r.type == eve_nerd.JUMP:
print("Jump to %s in %s" % (r.entity.name, r.entity.system.name))
elif r.type == eve_nerd.GATE:
print("Gate into %s" % (r.entity.system.name))
elif r.type == eve_nerd.WARP:
print("Warp to %s" % (r.entity.name))
elif r.type == eve_nerd.STRT:
print("Start at %s in %s" % (r.entity.name, r.entity.system.name))
print("")
# Load the universe
a = eve_nerd.Universe("mapDenormalize.csv", "mapJumps.csv")
# Go from D-P to G-M using a supercarrier preset
b = a.get_route(30003135, 30004696, eve_nerd.SUPERCARRIER)
print_route(b)
# Go from D-P to Period Basis using a Black Ops
b = a.get_route(30003135, 30004955, eve_nerd.BLACK_OPS)
print_route(b)
b = a.get_route(30003135, 30003632, eve_nerd.CARRIER)
print_route(b)
a.add_dynamic_bridge(40199046, 6.0)
b = a.get_route(30003135, 30004955, eve_nerd.BATTLECRUISER)
print_route(b)
a.add_static_bridge(40297263, 40297596)
b = a.get_route(30003135, 30004704, eve_nerd.BATTLECRUISER)
print_route(b)
c = eve_nerd.Parameters(8.0, 1.5, 10.0)
b = a.get_route(30003135, 30004704, c)
print_route(b)
|
<commit_before><commit_msg>Add an example (with wrong import path).<commit_after>import build.eve_nerd as eve_nerd
def print_route(v):
print((" %s to %s " % (v.points[0].entity.system.name, v.points[-1].entity.system.name)).center(80, "-"))
t = int(v.cost)
print("Estimated travel time: %d:%02d:%02d" % (t / 3600, (t % 3600) / 60, t % 60))
for x in range(len(v.points)):
r = v.points[x]
if r.type == eve_nerd.JUMP:
print("Jump to %s in %s" % (r.entity.name, r.entity.system.name))
elif r.type == eve_nerd.GATE:
print("Gate into %s" % (r.entity.system.name))
elif r.type == eve_nerd.WARP:
print("Warp to %s" % (r.entity.name))
elif r.type == eve_nerd.STRT:
print("Start at %s in %s" % (r.entity.name, r.entity.system.name))
print("")
# Load the universe
a = eve_nerd.Universe("mapDenormalize.csv", "mapJumps.csv")
# Go from D-P to G-M using a supercarrier preset
b = a.get_route(30003135, 30004696, eve_nerd.SUPERCARRIER)
print_route(b)
# Go from D-P to Period Basis using a Black Ops
b = a.get_route(30003135, 30004955, eve_nerd.BLACK_OPS)
print_route(b)
b = a.get_route(30003135, 30003632, eve_nerd.CARRIER)
print_route(b)
a.add_dynamic_bridge(40199046, 6.0)
b = a.get_route(30003135, 30004955, eve_nerd.BATTLECRUISER)
print_route(b)
a.add_static_bridge(40297263, 40297596)
b = a.get_route(30003135, 30004704, eve_nerd.BATTLECRUISER)
print_route(b)
c = eve_nerd.Parameters(8.0, 1.5, 10.0)
b = a.get_route(30003135, 30004704, c)
print_route(b)
|
|
17fc6d0c117799345bb8ddbbea732fa6c4ff309d
|
test/backend/test_job.py
|
test/backend/test_job.py
|
import unittest
import modloop
import saliweb.test
import saliweb.backend
import os
class JobTests(saliweb.test.TestCase):
"""Check custom ModLoop Job class"""
def test_run_sanity_check(self):
"""Test sanity checking in run method"""
j = self.make_test_job(modloop.Job, 'RUNNING')
d = saliweb.test.RunInDir(j.directory)
# Invalid characters in loops.tsv
open('loops.tsv', 'w').write('1\t%\t5\tA\t')
self.assertRaises(saliweb.backend.SanityError, j.run)
# Wrong number of fields in loops.tsv
open('loops.tsv', 'w').write('1\tA')
self.assertRaises(saliweb.backend.SanityError, j.run)
def test_run_ok(self):
"""Test successful run method"""
j = self.make_test_job(modloop.Job, 'RUNNING')
d = saliweb.test.RunInDir(j.directory)
open('loops.tsv', 'w').write('1\tA\t5\tA')
cls = j.run()
self.assert_(isinstance(cls, saliweb.backend.SGERunner),
"SGERunner not returned")
os.unlink('loop.py')
def test_postprocess_no_models(self):
"""Test postprocess method; no models produced"""
j = self.make_test_job(modloop.Job, 'POSTPROCESSING')
d = saliweb.test.RunInDir(j.directory)
j.postprocess()
self.assertFalse(os.path.exists('output.pdb'))
def test_postprocess_models(self):
"""Test postprocess method; some models produced"""
j = self.make_test_job(modloop.Job, 'POSTPROCESSING')
d = saliweb.test.RunInDir(j.directory)
print >> open('loop.BL0.pdb', 'w'), \
"REMARK 1 MODELLER OBJECTIVE FUNCTION: 309.6122"
print >> open('loop.BL1.pdb', 'w'), \
"REMARK 1 MODELLER OBJECTIVE FUNCTION: -457.3816"
print >> open('ignored.pdb', 'w'), \
"REMARK 1 MODELLER OBJECTIVE FUNCTION: -900.3816"
open('loops.tsv', 'w').write('1\tA\t5\tA')
j.postprocess()
os.unlink('output.pdb')
os.unlink('output-pdbs.tar.bz2')
os.unlink('ignored.pdb')
self.assertFalse(os.path.exists('loop.BL0.pdb'))
self.assertFalse(os.path.exists('loop.BL1.pdb'))
if __name__ == '__main__':
unittest.main()
|
Test ModLoop's custom Job class.
|
Test ModLoop's custom Job class.
|
Python
|
lgpl-2.1
|
salilab/modloop,salilab/modloop
|
Test ModLoop's custom Job class.
|
import unittest
import modloop
import saliweb.test
import saliweb.backend
import os
class JobTests(saliweb.test.TestCase):
"""Check custom ModLoop Job class"""
def test_run_sanity_check(self):
"""Test sanity checking in run method"""
j = self.make_test_job(modloop.Job, 'RUNNING')
d = saliweb.test.RunInDir(j.directory)
# Invalid characters in loops.tsv
open('loops.tsv', 'w').write('1\t%\t5\tA\t')
self.assertRaises(saliweb.backend.SanityError, j.run)
# Wrong number of fields in loops.tsv
open('loops.tsv', 'w').write('1\tA')
self.assertRaises(saliweb.backend.SanityError, j.run)
def test_run_ok(self):
"""Test successful run method"""
j = self.make_test_job(modloop.Job, 'RUNNING')
d = saliweb.test.RunInDir(j.directory)
open('loops.tsv', 'w').write('1\tA\t5\tA')
cls = j.run()
self.assert_(isinstance(cls, saliweb.backend.SGERunner),
"SGERunner not returned")
os.unlink('loop.py')
def test_postprocess_no_models(self):
"""Test postprocess method; no models produced"""
j = self.make_test_job(modloop.Job, 'POSTPROCESSING')
d = saliweb.test.RunInDir(j.directory)
j.postprocess()
self.assertFalse(os.path.exists('output.pdb'))
def test_postprocess_models(self):
"""Test postprocess method; some models produced"""
j = self.make_test_job(modloop.Job, 'POSTPROCESSING')
d = saliweb.test.RunInDir(j.directory)
print >> open('loop.BL0.pdb', 'w'), \
"REMARK 1 MODELLER OBJECTIVE FUNCTION: 309.6122"
print >> open('loop.BL1.pdb', 'w'), \
"REMARK 1 MODELLER OBJECTIVE FUNCTION: -457.3816"
print >> open('ignored.pdb', 'w'), \
"REMARK 1 MODELLER OBJECTIVE FUNCTION: -900.3816"
open('loops.tsv', 'w').write('1\tA\t5\tA')
j.postprocess()
os.unlink('output.pdb')
os.unlink('output-pdbs.tar.bz2')
os.unlink('ignored.pdb')
self.assertFalse(os.path.exists('loop.BL0.pdb'))
self.assertFalse(os.path.exists('loop.BL1.pdb'))
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Test ModLoop's custom Job class.<commit_after>
|
import unittest
import modloop
import saliweb.test
import saliweb.backend
import os
class JobTests(saliweb.test.TestCase):
"""Check custom ModLoop Job class"""
def test_run_sanity_check(self):
"""Test sanity checking in run method"""
j = self.make_test_job(modloop.Job, 'RUNNING')
d = saliweb.test.RunInDir(j.directory)
# Invalid characters in loops.tsv
open('loops.tsv', 'w').write('1\t%\t5\tA\t')
self.assertRaises(saliweb.backend.SanityError, j.run)
# Wrong number of fields in loops.tsv
open('loops.tsv', 'w').write('1\tA')
self.assertRaises(saliweb.backend.SanityError, j.run)
def test_run_ok(self):
"""Test successful run method"""
j = self.make_test_job(modloop.Job, 'RUNNING')
d = saliweb.test.RunInDir(j.directory)
open('loops.tsv', 'w').write('1\tA\t5\tA')
cls = j.run()
self.assert_(isinstance(cls, saliweb.backend.SGERunner),
"SGERunner not returned")
os.unlink('loop.py')
def test_postprocess_no_models(self):
"""Test postprocess method; no models produced"""
j = self.make_test_job(modloop.Job, 'POSTPROCESSING')
d = saliweb.test.RunInDir(j.directory)
j.postprocess()
self.assertFalse(os.path.exists('output.pdb'))
def test_postprocess_models(self):
"""Test postprocess method; some models produced"""
j = self.make_test_job(modloop.Job, 'POSTPROCESSING')
d = saliweb.test.RunInDir(j.directory)
print >> open('loop.BL0.pdb', 'w'), \
"REMARK 1 MODELLER OBJECTIVE FUNCTION: 309.6122"
print >> open('loop.BL1.pdb', 'w'), \
"REMARK 1 MODELLER OBJECTIVE FUNCTION: -457.3816"
print >> open('ignored.pdb', 'w'), \
"REMARK 1 MODELLER OBJECTIVE FUNCTION: -900.3816"
open('loops.tsv', 'w').write('1\tA\t5\tA')
j.postprocess()
os.unlink('output.pdb')
os.unlink('output-pdbs.tar.bz2')
os.unlink('ignored.pdb')
self.assertFalse(os.path.exists('loop.BL0.pdb'))
self.assertFalse(os.path.exists('loop.BL1.pdb'))
if __name__ == '__main__':
unittest.main()
|
Test ModLoop's custom Job class.import unittest
import modloop
import saliweb.test
import saliweb.backend
import os
class JobTests(saliweb.test.TestCase):
"""Check custom ModLoop Job class"""
def test_run_sanity_check(self):
"""Test sanity checking in run method"""
j = self.make_test_job(modloop.Job, 'RUNNING')
d = saliweb.test.RunInDir(j.directory)
# Invalid characters in loops.tsv
open('loops.tsv', 'w').write('1\t%\t5\tA\t')
self.assertRaises(saliweb.backend.SanityError, j.run)
# Wrong number of fields in loops.tsv
open('loops.tsv', 'w').write('1\tA')
self.assertRaises(saliweb.backend.SanityError, j.run)
def test_run_ok(self):
"""Test successful run method"""
j = self.make_test_job(modloop.Job, 'RUNNING')
d = saliweb.test.RunInDir(j.directory)
open('loops.tsv', 'w').write('1\tA\t5\tA')
cls = j.run()
self.assert_(isinstance(cls, saliweb.backend.SGERunner),
"SGERunner not returned")
os.unlink('loop.py')
def test_postprocess_no_models(self):
"""Test postprocess method; no models produced"""
j = self.make_test_job(modloop.Job, 'POSTPROCESSING')
d = saliweb.test.RunInDir(j.directory)
j.postprocess()
self.assertFalse(os.path.exists('output.pdb'))
def test_postprocess_models(self):
"""Test postprocess method; some models produced"""
j = self.make_test_job(modloop.Job, 'POSTPROCESSING')
d = saliweb.test.RunInDir(j.directory)
print >> open('loop.BL0.pdb', 'w'), \
"REMARK 1 MODELLER OBJECTIVE FUNCTION: 309.6122"
print >> open('loop.BL1.pdb', 'w'), \
"REMARK 1 MODELLER OBJECTIVE FUNCTION: -457.3816"
print >> open('ignored.pdb', 'w'), \
"REMARK 1 MODELLER OBJECTIVE FUNCTION: -900.3816"
open('loops.tsv', 'w').write('1\tA\t5\tA')
j.postprocess()
os.unlink('output.pdb')
os.unlink('output-pdbs.tar.bz2')
os.unlink('ignored.pdb')
self.assertFalse(os.path.exists('loop.BL0.pdb'))
self.assertFalse(os.path.exists('loop.BL1.pdb'))
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Test ModLoop's custom Job class.<commit_after>import unittest
import modloop
import saliweb.test
import saliweb.backend
import os
class JobTests(saliweb.test.TestCase):
"""Check custom ModLoop Job class"""
def test_run_sanity_check(self):
"""Test sanity checking in run method"""
j = self.make_test_job(modloop.Job, 'RUNNING')
d = saliweb.test.RunInDir(j.directory)
# Invalid characters in loops.tsv
open('loops.tsv', 'w').write('1\t%\t5\tA\t')
self.assertRaises(saliweb.backend.SanityError, j.run)
# Wrong number of fields in loops.tsv
open('loops.tsv', 'w').write('1\tA')
self.assertRaises(saliweb.backend.SanityError, j.run)
def test_run_ok(self):
"""Test successful run method"""
j = self.make_test_job(modloop.Job, 'RUNNING')
d = saliweb.test.RunInDir(j.directory)
open('loops.tsv', 'w').write('1\tA\t5\tA')
cls = j.run()
self.assert_(isinstance(cls, saliweb.backend.SGERunner),
"SGERunner not returned")
os.unlink('loop.py')
def test_postprocess_no_models(self):
"""Test postprocess method; no models produced"""
j = self.make_test_job(modloop.Job, 'POSTPROCESSING')
d = saliweb.test.RunInDir(j.directory)
j.postprocess()
self.assertFalse(os.path.exists('output.pdb'))
def test_postprocess_models(self):
"""Test postprocess method; some models produced"""
j = self.make_test_job(modloop.Job, 'POSTPROCESSING')
d = saliweb.test.RunInDir(j.directory)
print >> open('loop.BL0.pdb', 'w'), \
"REMARK 1 MODELLER OBJECTIVE FUNCTION: 309.6122"
print >> open('loop.BL1.pdb', 'w'), \
"REMARK 1 MODELLER OBJECTIVE FUNCTION: -457.3816"
print >> open('ignored.pdb', 'w'), \
"REMARK 1 MODELLER OBJECTIVE FUNCTION: -900.3816"
open('loops.tsv', 'w').write('1\tA\t5\tA')
j.postprocess()
os.unlink('output.pdb')
os.unlink('output-pdbs.tar.bz2')
os.unlink('ignored.pdb')
self.assertFalse(os.path.exists('loop.BL0.pdb'))
self.assertFalse(os.path.exists('loop.BL1.pdb'))
if __name__ == '__main__':
unittest.main()
|
|
1038f39191ae5dad0d91f7b8a89411c51ad028d0
|
ncclient/devices/junos.py
|
ncclient/devices/junos.py
|
"""
Handler for Cisco CSR device specific information.
Note that for proper import, the classname has to be:
"<Devicename>DeviceHandler"
...where <Devicename> is something like "Default", "Nexus", etc.
All device-specific handlers derive from the DefaultDeviceHandler, which implements the
generic information needed for interaction with a Netconf server.
"""
from .default import DefaultDeviceHandler
from ncclient.operations.third_party.juniper.rpc import GetConfiguration, LoadConfiguration, CompareConfiguration
from ncclient.operations.third_party.juniper.rpc import ExecuteRpc, Command, Reboot, Halt
class JunosDeviceHandler(DefaultDeviceHandler):
"""
Juniper handler for device specific information.
"""
def __init__(self, device_params):
super(JunosDeviceHandler, self).__init__(device_params)
def add_additional_operations(self):
dict = {}
dict["rpc"] = ExecuteRpc
dict["get_configuration"] = GetConfiguration
dict["load_configuration"] = LoadConfiguration
dict["compare_configuration"] = CompareConfiguration
dict["command"] = Command
dict["reboot"] = Reboot
dict["halt"] = Halt
return dict
def perform_qualify_check(self):
return False
|
Implement extra Juniper operations in devices module
|
Implement extra Juniper operations in devices module
|
Python
|
apache-2.0
|
sebastianw/ncclient,lightlu/ncclient,aitorhh/ncclient,OpenClovis/ncclient,katharh/ncclient,nnakamot/ncclient,ncclient/ncclient,earies/ncclient,vnitinv/ncclient,einarnn/ncclient,kroustou/ncclient,nwautomator/ncclient,cmoberg/ncclient,GIC-de/ncclient,leopoul/ncclient,joysboy/ncclient
|
Implement extra Juniper operations in devices module
|
"""
Handler for Cisco CSR device specific information.
Note that for proper import, the classname has to be:
"<Devicename>DeviceHandler"
...where <Devicename> is something like "Default", "Nexus", etc.
All device-specific handlers derive from the DefaultDeviceHandler, which implements the
generic information needed for interaction with a Netconf server.
"""
from .default import DefaultDeviceHandler
from ncclient.operations.third_party.juniper.rpc import GetConfiguration, LoadConfiguration, CompareConfiguration
from ncclient.operations.third_party.juniper.rpc import ExecuteRpc, Command, Reboot, Halt
class JunosDeviceHandler(DefaultDeviceHandler):
"""
Juniper handler for device specific information.
"""
def __init__(self, device_params):
super(JunosDeviceHandler, self).__init__(device_params)
def add_additional_operations(self):
dict = {}
dict["rpc"] = ExecuteRpc
dict["get_configuration"] = GetConfiguration
dict["load_configuration"] = LoadConfiguration
dict["compare_configuration"] = CompareConfiguration
dict["command"] = Command
dict["reboot"] = Reboot
dict["halt"] = Halt
return dict
def perform_qualify_check(self):
return False
|
<commit_before><commit_msg>Implement extra Juniper operations in devices module<commit_after>
|
"""
Handler for Cisco CSR device specific information.
Note that for proper import, the classname has to be:
"<Devicename>DeviceHandler"
...where <Devicename> is something like "Default", "Nexus", etc.
All device-specific handlers derive from the DefaultDeviceHandler, which implements the
generic information needed for interaction with a Netconf server.
"""
from .default import DefaultDeviceHandler
from ncclient.operations.third_party.juniper.rpc import GetConfiguration, LoadConfiguration, CompareConfiguration
from ncclient.operations.third_party.juniper.rpc import ExecuteRpc, Command, Reboot, Halt
class JunosDeviceHandler(DefaultDeviceHandler):
"""
Juniper handler for device specific information.
"""
def __init__(self, device_params):
super(JunosDeviceHandler, self).__init__(device_params)
def add_additional_operations(self):
dict = {}
dict["rpc"] = ExecuteRpc
dict["get_configuration"] = GetConfiguration
dict["load_configuration"] = LoadConfiguration
dict["compare_configuration"] = CompareConfiguration
dict["command"] = Command
dict["reboot"] = Reboot
dict["halt"] = Halt
return dict
def perform_qualify_check(self):
return False
|
Implement extra Juniper operations in devices module"""
Handler for Cisco CSR device specific information.
Note that for proper import, the classname has to be:
"<Devicename>DeviceHandler"
...where <Devicename> is something like "Default", "Nexus", etc.
All device-specific handlers derive from the DefaultDeviceHandler, which implements the
generic information needed for interaction with a Netconf server.
"""
from .default import DefaultDeviceHandler
from ncclient.operations.third_party.juniper.rpc import GetConfiguration, LoadConfiguration, CompareConfiguration
from ncclient.operations.third_party.juniper.rpc import ExecuteRpc, Command, Reboot, Halt
class JunosDeviceHandler(DefaultDeviceHandler):
"""
Juniper handler for device specific information.
"""
def __init__(self, device_params):
super(JunosDeviceHandler, self).__init__(device_params)
def add_additional_operations(self):
dict = {}
dict["rpc"] = ExecuteRpc
dict["get_configuration"] = GetConfiguration
dict["load_configuration"] = LoadConfiguration
dict["compare_configuration"] = CompareConfiguration
dict["command"] = Command
dict["reboot"] = Reboot
dict["halt"] = Halt
return dict
def perform_qualify_check(self):
return False
|
<commit_before><commit_msg>Implement extra Juniper operations in devices module<commit_after>"""
Handler for Cisco CSR device specific information.
Note that for proper import, the classname has to be:
"<Devicename>DeviceHandler"
...where <Devicename> is something like "Default", "Nexus", etc.
All device-specific handlers derive from the DefaultDeviceHandler, which implements the
generic information needed for interaction with a Netconf server.
"""
from .default import DefaultDeviceHandler
from ncclient.operations.third_party.juniper.rpc import GetConfiguration, LoadConfiguration, CompareConfiguration
from ncclient.operations.third_party.juniper.rpc import ExecuteRpc, Command, Reboot, Halt
class JunosDeviceHandler(DefaultDeviceHandler):
"""
Juniper handler for device specific information.
"""
def __init__(self, device_params):
super(JunosDeviceHandler, self).__init__(device_params)
def add_additional_operations(self):
dict = {}
dict["rpc"] = ExecuteRpc
dict["get_configuration"] = GetConfiguration
dict["load_configuration"] = LoadConfiguration
dict["compare_configuration"] = CompareConfiguration
dict["command"] = Command
dict["reboot"] = Reboot
dict["halt"] = Halt
return dict
def perform_qualify_check(self):
return False
|
|
f4026b34f97c4e42a2229d47e778fbe09b351eb1
|
tools/tabulate_events.py
|
tools/tabulate_events.py
|
#!/usr/bin/env python
# note: must be invoked from the top-level sts directory
import time
import argparse
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from sts.replay_event import *
from sts.dataplane_traces.trace import Trace
from sts.input_traces.log_parser import parse
from tools.pretty_print_input_trace import default_fields, field_formatters
class EventGrouping(object):
def __init__(self, name):
self.name = name
self.events = []
def append(self, event):
self.events.append(event)
def printToConsole(self):
# TODO(cs): bad feng-shui to have side-effects rather than returning a
# string. Should refactor field_formatters to not have side-effects
# either.
title_str = "====================== %s ======================" % self.name
print title_str
for event in self.events:
for field in default_fields:
field_formatters[field](event)
print "=" * len(title_str)
def main(args):
# N.B. it would be nice to include link discovery or host location discovery
# events here, but that's specific to the Controller's log output.
network_failure_events = EventGrouping("Topology Change Events")
controlplane_failure_events = EventGrouping("Control Plane Blockages")
controller_failure_events = EventGrouping("Controller Change Events")
host_events = EventGrouping("Host Migrations")
event2grouping = {
SwitchFailure : network_failure_events,
SwitchRecovery : network_failure_events,
LinkFailure : network_failure_events,
LinkRecovery : network_failure_events,
ControlChannelBlock : controlplane_failure_events,
ControlChannelUnblock : controlplane_failure_events,
ControllerFailure : controller_failure_events,
ControllerRecovery : controller_failure_events,
BlockControllerPair : controller_failure_events,
UnblockControllerPair : controller_failure_events,
HostMigration : host_events,
# TODO(cs): support TrafficInjection, DataplaneDrop? Might get too noisy.
}
with open(args.input) as input_file:
trace = parse(input_file)
for event in trace:
if type(event) in event2grouping:
event2grouping[type(event)].append(event)
for grouping in [network_failure_events, controlplane_failure_events,
controller_failure_events, host_events]:
grouping.printToConsole()
print
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input', metavar="INPUT",
help='The input json file to be printed')
args = parser.parse_args()
main(args)
|
Add simple tool for tabulating classes of event types for readability
|
Add simple tool for tabulating classes of event types for readability
|
Python
|
apache-2.0
|
jmiserez/sts,jmiserez/sts,ucb-sts/sts,ucb-sts/sts
|
Add simple tool for tabulating classes of event types for readability
|
#!/usr/bin/env python
# note: must be invoked from the top-level sts directory
import time
import argparse
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from sts.replay_event import *
from sts.dataplane_traces.trace import Trace
from sts.input_traces.log_parser import parse
from tools.pretty_print_input_trace import default_fields, field_formatters
class EventGrouping(object):
def __init__(self, name):
self.name = name
self.events = []
def append(self, event):
self.events.append(event)
def printToConsole(self):
# TODO(cs): bad feng-shui to have side-effects rather than returning a
# string. Should refactor field_formatters to not have side-effects
# either.
title_str = "====================== %s ======================" % self.name
print title_str
for event in self.events:
for field in default_fields:
field_formatters[field](event)
print "=" * len(title_str)
def main(args):
# N.B. it would be nice to include link discovery or host location discovery
# events here, but that's specific to the Controller's log output.
network_failure_events = EventGrouping("Topology Change Events")
controlplane_failure_events = EventGrouping("Control Plane Blockages")
controller_failure_events = EventGrouping("Controller Change Events")
host_events = EventGrouping("Host Migrations")
event2grouping = {
SwitchFailure : network_failure_events,
SwitchRecovery : network_failure_events,
LinkFailure : network_failure_events,
LinkRecovery : network_failure_events,
ControlChannelBlock : controlplane_failure_events,
ControlChannelUnblock : controlplane_failure_events,
ControllerFailure : controller_failure_events,
ControllerRecovery : controller_failure_events,
BlockControllerPair : controller_failure_events,
UnblockControllerPair : controller_failure_events,
HostMigration : host_events,
# TODO(cs): support TrafficInjection, DataplaneDrop? Might get too noisy.
}
with open(args.input) as input_file:
trace = parse(input_file)
for event in trace:
if type(event) in event2grouping:
event2grouping[type(event)].append(event)
for grouping in [network_failure_events, controlplane_failure_events,
controller_failure_events, host_events]:
grouping.printToConsole()
print
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input', metavar="INPUT",
help='The input json file to be printed')
args = parser.parse_args()
main(args)
|
<commit_before><commit_msg>Add simple tool for tabulating classes of event types for readability<commit_after>
|
#!/usr/bin/env python
# note: must be invoked from the top-level sts directory
import time
import argparse
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from sts.replay_event import *
from sts.dataplane_traces.trace import Trace
from sts.input_traces.log_parser import parse
from tools.pretty_print_input_trace import default_fields, field_formatters
class EventGrouping(object):
def __init__(self, name):
self.name = name
self.events = []
def append(self, event):
self.events.append(event)
def printToConsole(self):
# TODO(cs): bad feng-shui to have side-effects rather than returning a
# string. Should refactor field_formatters to not have side-effects
# either.
title_str = "====================== %s ======================" % self.name
print title_str
for event in self.events:
for field in default_fields:
field_formatters[field](event)
print "=" * len(title_str)
def main(args):
# N.B. it would be nice to include link discovery or host location discovery
# events here, but that's specific to the Controller's log output.
network_failure_events = EventGrouping("Topology Change Events")
controlplane_failure_events = EventGrouping("Control Plane Blockages")
controller_failure_events = EventGrouping("Controller Change Events")
host_events = EventGrouping("Host Migrations")
event2grouping = {
SwitchFailure : network_failure_events,
SwitchRecovery : network_failure_events,
LinkFailure : network_failure_events,
LinkRecovery : network_failure_events,
ControlChannelBlock : controlplane_failure_events,
ControlChannelUnblock : controlplane_failure_events,
ControllerFailure : controller_failure_events,
ControllerRecovery : controller_failure_events,
BlockControllerPair : controller_failure_events,
UnblockControllerPair : controller_failure_events,
HostMigration : host_events,
# TODO(cs): support TrafficInjection, DataplaneDrop? Might get too noisy.
}
with open(args.input) as input_file:
trace = parse(input_file)
for event in trace:
if type(event) in event2grouping:
event2grouping[type(event)].append(event)
for grouping in [network_failure_events, controlplane_failure_events,
controller_failure_events, host_events]:
grouping.printToConsole()
print
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input', metavar="INPUT",
help='The input json file to be printed')
args = parser.parse_args()
main(args)
|
Add simple tool for tabulating classes of event types for readability#!/usr/bin/env python
# note: must be invoked from the top-level sts directory
import time
import argparse
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from sts.replay_event import *
from sts.dataplane_traces.trace import Trace
from sts.input_traces.log_parser import parse
from tools.pretty_print_input_trace import default_fields, field_formatters
class EventGrouping(object):
def __init__(self, name):
self.name = name
self.events = []
def append(self, event):
self.events.append(event)
def printToConsole(self):
# TODO(cs): bad feng-shui to have side-effects rather than returning a
# string. Should refactor field_formatters to not have side-effects
# either.
title_str = "====================== %s ======================" % self.name
print title_str
for event in self.events:
for field in default_fields:
field_formatters[field](event)
print "=" * len(title_str)
def main(args):
# N.B. it would be nice to include link discovery or host location discovery
# events here, but that's specific to the Controller's log output.
network_failure_events = EventGrouping("Topology Change Events")
controlplane_failure_events = EventGrouping("Control Plane Blockages")
controller_failure_events = EventGrouping("Controller Change Events")
host_events = EventGrouping("Host Migrations")
event2grouping = {
SwitchFailure : network_failure_events,
SwitchRecovery : network_failure_events,
LinkFailure : network_failure_events,
LinkRecovery : network_failure_events,
ControlChannelBlock : controlplane_failure_events,
ControlChannelUnblock : controlplane_failure_events,
ControllerFailure : controller_failure_events,
ControllerRecovery : controller_failure_events,
BlockControllerPair : controller_failure_events,
UnblockControllerPair : controller_failure_events,
HostMigration : host_events,
# TODO(cs): support TrafficInjection, DataplaneDrop? Might get too noisy.
}
with open(args.input) as input_file:
trace = parse(input_file)
for event in trace:
if type(event) in event2grouping:
event2grouping[type(event)].append(event)
for grouping in [network_failure_events, controlplane_failure_events,
controller_failure_events, host_events]:
grouping.printToConsole()
print
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input', metavar="INPUT",
help='The input json file to be printed')
args = parser.parse_args()
main(args)
|
<commit_before><commit_msg>Add simple tool for tabulating classes of event types for readability<commit_after>#!/usr/bin/env python
# note: must be invoked from the top-level sts directory
import time
import argparse
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from sts.replay_event import *
from sts.dataplane_traces.trace import Trace
from sts.input_traces.log_parser import parse
from tools.pretty_print_input_trace import default_fields, field_formatters
class EventGrouping(object):
def __init__(self, name):
self.name = name
self.events = []
def append(self, event):
self.events.append(event)
def printToConsole(self):
# TODO(cs): bad feng-shui to have side-effects rather than returning a
# string. Should refactor field_formatters to not have side-effects
# either.
title_str = "====================== %s ======================" % self.name
print title_str
for event in self.events:
for field in default_fields:
field_formatters[field](event)
print "=" * len(title_str)
def main(args):
# N.B. it would be nice to include link discovery or host location discovery
# events here, but that's specific to the Controller's log output.
network_failure_events = EventGrouping("Topology Change Events")
controlplane_failure_events = EventGrouping("Control Plane Blockages")
controller_failure_events = EventGrouping("Controller Change Events")
host_events = EventGrouping("Host Migrations")
event2grouping = {
SwitchFailure : network_failure_events,
SwitchRecovery : network_failure_events,
LinkFailure : network_failure_events,
LinkRecovery : network_failure_events,
ControlChannelBlock : controlplane_failure_events,
ControlChannelUnblock : controlplane_failure_events,
ControllerFailure : controller_failure_events,
ControllerRecovery : controller_failure_events,
BlockControllerPair : controller_failure_events,
UnblockControllerPair : controller_failure_events,
HostMigration : host_events,
# TODO(cs): support TrafficInjection, DataplaneDrop? Might get too noisy.
}
with open(args.input) as input_file:
trace = parse(input_file)
for event in trace:
if type(event) in event2grouping:
event2grouping[type(event)].append(event)
for grouping in [network_failure_events, controlplane_failure_events,
controller_failure_events, host_events]:
grouping.printToConsole()
print
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input', metavar="INPUT",
help='The input json file to be printed')
args = parser.parse_args()
main(args)
|
|
95db13e4d0342d44e6a8375779b543f6d631fd8d
|
shinken/modules/dummy_poller.py
|
shinken/modules/dummy_poller.py
|
#!/usr/bin/python
#Copyright (C) 2009 Gabes Jean, naparuba@gmail.com
#
#This file is part of Shinken.
#
#Shinken is free software: you can redistribute it and/or modify
#it under the terms of the GNU Affero General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#Shinken is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU Affero General Public License for more details.
#
#You should have received a copy of the GNU Affero General Public License
#along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#This Class is an example of an Scheduler module
#Here for the configuration phase AND running one
#This text is print at the import
print "Detected module : Dummy module for Poller"
from shinken.basemodule import BaseModule
properties = {
'type' : 'dummy_poller',
'external' : False,
'phases' : ['worker'],
}
#called by the plugin manager to get a broker
def get_instance(mod_conf):
print "Get a Dummy poller module for plugin %s" % mod_conf.get_name()
instance = Dummy_poller(mod_conf)
return instance
#Just print some stuff
class Dummy_poller(BaseModule):
def __init__(self, mod_conf):
BaseModule.__init__(self, mod_conf)
# Called by poller to say 'let's prepare yourself guy'
def init(self):
print "Initilisation of the dummy poller module"
|
Add : dummy poller module example.
|
Add : dummy poller module example.
|
Python
|
agpl-3.0
|
Simage/shinken,ddurieux/alignak,dfranco/shinken,gst/alignak,Simage/shinken,rednach/krill,geektophe/shinken,Simage/shinken,mohierf/shinken,Simage/shinken,kaji-project/shinken,kaji-project/shinken,tal-nino/shinken,Aimage/shinken,Aimage/shinken,geektophe/shinken,tal-nino/shinken,staute/shinken_deb,peeyush-tm/shinken,KerkhoffTechnologies/shinken,peeyush-tm/shinken,staute/shinken_package,h4wkmoon/shinken,xorpaul/shinken,fpeyre/shinken,naparuba/shinken,xorpaul/shinken,dfranco/shinken,xorpaul/shinken,kaji-project/shinken,fpeyre/shinken,geektophe/shinken,staute/shinken_deb,h4wkmoon/shinken,staute/shinken_deb,naparuba/shinken,rledisez/shinken,peeyush-tm/shinken,savoirfairelinux/shinken,gst/alignak,xorpaul/shinken,dfranco/shinken,rledisez/shinken,rednach/krill,xorpaul/shinken,peeyush-tm/shinken,Alignak-monitoring/alignak,peeyush-tm/shinken,naparuba/shinken,lets-software/shinken,baloo/shinken,kaji-project/shinken,staute/shinken_package,baloo/shinken,claneys/shinken,mohierf/shinken,gst/alignak,Aimage/shinken,naparuba/shinken,fpeyre/shinken,mohierf/shinken,KerkhoffTechnologies/shinken,lets-software/shinken,rledisez/shinken,ddurieux/alignak,staute/shinken_package,KerkhoffTechnologies/shinken,mohierf/shinken,xorpaul/shinken,fpeyre/shinken,staute/shinken_deb,xorpaul/shinken,tal-nino/shinken,claneys/shinken,titilambert/alignak,titilambert/alignak,tal-nino/shinken,claneys/shinken,Alignak-monitoring/alignak,naparuba/shinken,staute/shinken_deb,Aimage/shinken,baloo/shinken,h4wkmoon/shinken,claneys/shinken,rednach/krill,savoirfairelinux/shinken,tal-nino/shinken,ddurieux/alignak,ddurieux/alignak,ddurieux/alignak,geektophe/shinken,peeyush-tm/shinken,fpeyre/shinken,kaji-project/shinken,lets-software/shinken,rednach/krill,baloo/shinken,KerkhoffTechnologies/shinken,baloo/shinken,naparuba/shinken,tal-nino/shinken,savoirfairelinux/shinken,KerkhoffTechnologies/shinken,claneys/shinken,dfranco/shinken,rednach/krill,rednach/krill,lets-software/shinken,dfranco/shinken,Aimage/shinken,mohierf/shinken,gst/alignak,geektophe/shinken,fpeyre/shinken,titilambert/alignak,savoirfairelinux/shinken,Simage/shinken,claneys/shinken,Aimage/shinken,lets-software/shinken,lets-software/shinken,h4wkmoon/shinken,staute/shinken_package,Simage/shinken,savoirfairelinux/shinken,geektophe/shinken,staute/shinken_deb,rledisez/shinken,xorpaul/shinken,h4wkmoon/shinken,staute/shinken_package,kaji-project/shinken,titilambert/alignak,savoirfairelinux/shinken,rledisez/shinken,staute/shinken_package,baloo/shinken,h4wkmoon/shinken,rledisez/shinken,ddurieux/alignak,h4wkmoon/shinken,dfranco/shinken,mohierf/shinken,kaji-project/shinken,h4wkmoon/shinken,KerkhoffTechnologies/shinken
|
Add : dummy poller module example.
|
#!/usr/bin/python
#Copyright (C) 2009 Gabes Jean, naparuba@gmail.com
#
#This file is part of Shinken.
#
#Shinken is free software: you can redistribute it and/or modify
#it under the terms of the GNU Affero General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#Shinken is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU Affero General Public License for more details.
#
#You should have received a copy of the GNU Affero General Public License
#along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#This Class is an example of an Scheduler module
#Here for the configuration phase AND running one
#This text is print at the import
print "Detected module : Dummy module for Poller"
from shinken.basemodule import BaseModule
properties = {
'type' : 'dummy_poller',
'external' : False,
'phases' : ['worker'],
}
#called by the plugin manager to get a broker
def get_instance(mod_conf):
print "Get a Dummy poller module for plugin %s" % mod_conf.get_name()
instance = Dummy_poller(mod_conf)
return instance
#Just print some stuff
class Dummy_poller(BaseModule):
def __init__(self, mod_conf):
BaseModule.__init__(self, mod_conf)
# Called by poller to say 'let's prepare yourself guy'
def init(self):
print "Initilisation of the dummy poller module"
|
<commit_before><commit_msg>Add : dummy poller module example.<commit_after>
|
#!/usr/bin/python
#Copyright (C) 2009 Gabes Jean, naparuba@gmail.com
#
#This file is part of Shinken.
#
#Shinken is free software: you can redistribute it and/or modify
#it under the terms of the GNU Affero General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#Shinken is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU Affero General Public License for more details.
#
#You should have received a copy of the GNU Affero General Public License
#along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#This Class is an example of an Scheduler module
#Here for the configuration phase AND running one
#This text is print at the import
print "Detected module : Dummy module for Poller"
from shinken.basemodule import BaseModule
properties = {
'type' : 'dummy_poller',
'external' : False,
'phases' : ['worker'],
}
#called by the plugin manager to get a broker
def get_instance(mod_conf):
print "Get a Dummy poller module for plugin %s" % mod_conf.get_name()
instance = Dummy_poller(mod_conf)
return instance
#Just print some stuff
class Dummy_poller(BaseModule):
def __init__(self, mod_conf):
BaseModule.__init__(self, mod_conf)
# Called by poller to say 'let's prepare yourself guy'
def init(self):
print "Initilisation of the dummy poller module"
|
Add : dummy poller module example.#!/usr/bin/python
#Copyright (C) 2009 Gabes Jean, naparuba@gmail.com
#
#This file is part of Shinken.
#
#Shinken is free software: you can redistribute it and/or modify
#it under the terms of the GNU Affero General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#Shinken is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU Affero General Public License for more details.
#
#You should have received a copy of the GNU Affero General Public License
#along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#This Class is an example of an Scheduler module
#Here for the configuration phase AND running one
#This text is print at the import
print "Detected module : Dummy module for Poller"
from shinken.basemodule import BaseModule
properties = {
'type' : 'dummy_poller',
'external' : False,
'phases' : ['worker'],
}
#called by the plugin manager to get a broker
def get_instance(mod_conf):
print "Get a Dummy poller module for plugin %s" % mod_conf.get_name()
instance = Dummy_poller(mod_conf)
return instance
#Just print some stuff
class Dummy_poller(BaseModule):
def __init__(self, mod_conf):
BaseModule.__init__(self, mod_conf)
# Called by poller to say 'let's prepare yourself guy'
def init(self):
print "Initilisation of the dummy poller module"
|
<commit_before><commit_msg>Add : dummy poller module example.<commit_after>#!/usr/bin/python
#Copyright (C) 2009 Gabes Jean, naparuba@gmail.com
#
#This file is part of Shinken.
#
#Shinken is free software: you can redistribute it and/or modify
#it under the terms of the GNU Affero General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#Shinken is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU Affero General Public License for more details.
#
#You should have received a copy of the GNU Affero General Public License
#along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#This Class is an example of an Scheduler module
#Here for the configuration phase AND running one
#This text is print at the import
print "Detected module : Dummy module for Poller"
from shinken.basemodule import BaseModule
properties = {
'type' : 'dummy_poller',
'external' : False,
'phases' : ['worker'],
}
#called by the plugin manager to get a broker
def get_instance(mod_conf):
print "Get a Dummy poller module for plugin %s" % mod_conf.get_name()
instance = Dummy_poller(mod_conf)
return instance
#Just print some stuff
class Dummy_poller(BaseModule):
def __init__(self, mod_conf):
BaseModule.__init__(self, mod_conf)
# Called by poller to say 'let's prepare yourself guy'
def init(self):
print "Initilisation of the dummy poller module"
|
|
780c2ed7df2a94de5f5ec89cc4b970cf44c04f06
|
tests/admin_log.py
|
tests/admin_log.py
|
from wolis.test_case import WolisTestCase
class AdminLogTest(WolisTestCase):
def test_admin_log(self):
self.login('morpheus', 'morpheus')
self.acp_login('morpheus', 'morpheus')
start_url = '/adm/index.php'
self.get_with_sid(start_url)
self.assert_successish()
assert 'Board statistics' in self.response.body
url = self.response.urljoin(self.link_href_by_acp_tab_title('Maintenance'))
self.get(url)
self.assert_successish()
# matches nav and heading
assert 'Admin log' in self.response.body
# matches explanation
assert 'This lists all the actions carried out by board administrators.' in self.response.body
# matches a log entry
assert 'Successful administration login' in self.response.body
if __name__ == '__main__':
import unittest
unittest.main()
|
Test for admin log viewing
|
Test for admin log viewing
|
Python
|
bsd-2-clause
|
p/wolis-phpbb,p/wolis-phpbb
|
Test for admin log viewing
|
from wolis.test_case import WolisTestCase
class AdminLogTest(WolisTestCase):
def test_admin_log(self):
self.login('morpheus', 'morpheus')
self.acp_login('morpheus', 'morpheus')
start_url = '/adm/index.php'
self.get_with_sid(start_url)
self.assert_successish()
assert 'Board statistics' in self.response.body
url = self.response.urljoin(self.link_href_by_acp_tab_title('Maintenance'))
self.get(url)
self.assert_successish()
# matches nav and heading
assert 'Admin log' in self.response.body
# matches explanation
assert 'This lists all the actions carried out by board administrators.' in self.response.body
# matches a log entry
assert 'Successful administration login' in self.response.body
if __name__ == '__main__':
import unittest
unittest.main()
|
<commit_before><commit_msg>Test for admin log viewing<commit_after>
|
from wolis.test_case import WolisTestCase
class AdminLogTest(WolisTestCase):
def test_admin_log(self):
self.login('morpheus', 'morpheus')
self.acp_login('morpheus', 'morpheus')
start_url = '/adm/index.php'
self.get_with_sid(start_url)
self.assert_successish()
assert 'Board statistics' in self.response.body
url = self.response.urljoin(self.link_href_by_acp_tab_title('Maintenance'))
self.get(url)
self.assert_successish()
# matches nav and heading
assert 'Admin log' in self.response.body
# matches explanation
assert 'This lists all the actions carried out by board administrators.' in self.response.body
# matches a log entry
assert 'Successful administration login' in self.response.body
if __name__ == '__main__':
import unittest
unittest.main()
|
Test for admin log viewingfrom wolis.test_case import WolisTestCase
class AdminLogTest(WolisTestCase):
def test_admin_log(self):
self.login('morpheus', 'morpheus')
self.acp_login('morpheus', 'morpheus')
start_url = '/adm/index.php'
self.get_with_sid(start_url)
self.assert_successish()
assert 'Board statistics' in self.response.body
url = self.response.urljoin(self.link_href_by_acp_tab_title('Maintenance'))
self.get(url)
self.assert_successish()
# matches nav and heading
assert 'Admin log' in self.response.body
# matches explanation
assert 'This lists all the actions carried out by board administrators.' in self.response.body
# matches a log entry
assert 'Successful administration login' in self.response.body
if __name__ == '__main__':
import unittest
unittest.main()
|
<commit_before><commit_msg>Test for admin log viewing<commit_after>from wolis.test_case import WolisTestCase
class AdminLogTest(WolisTestCase):
def test_admin_log(self):
self.login('morpheus', 'morpheus')
self.acp_login('morpheus', 'morpheus')
start_url = '/adm/index.php'
self.get_with_sid(start_url)
self.assert_successish()
assert 'Board statistics' in self.response.body
url = self.response.urljoin(self.link_href_by_acp_tab_title('Maintenance'))
self.get(url)
self.assert_successish()
# matches nav and heading
assert 'Admin log' in self.response.body
# matches explanation
assert 'This lists all the actions carried out by board administrators.' in self.response.body
# matches a log entry
assert 'Successful administration login' in self.response.body
if __name__ == '__main__':
import unittest
unittest.main()
|
|
092abd7035168c18c2e49bfbcb5a36b778015a2d
|
traits/tests/test_abc.py
|
traits/tests/test_abc.py
|
""" Test the ABC functionality.
"""
import unittest
try:
import abc
from ..api import ABCHasTraits, ABCMetaHasTraits, HasTraits, Int, Float
class AbstractFoo(ABCHasTraits):
x = Int(10)
y = Float(20.0)
@abc.abstractmethod
def foo(self):
raise NotImplementedError()
@abc.abstractproperty
def bar(self):
raise NotImplementedError()
class ConcreteFoo(AbstractFoo):
def foo(self):
return 'foo'
@property
def bar(self):
return 'bar'
class FooLike(HasTraits):
x = Int(10)
y = Float(20.0)
def foo(self):
return 'foo'
@property
def bar(self):
return 'bar'
AbstractFoo.register(FooLike)
class AbstractBar(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def bar(self):
raise NotImplementedError()
class TestABC(unittest.TestCase):
def test_basic_abc(self):
self.assertRaises(TypeError, AbstractFoo)
concrete = ConcreteFoo()
self.assertEquals(concrete.foo(), 'foo')
self.assertEquals(concrete.bar, 'bar')
self.assertEquals(concrete.x, 10)
self.assertEquals(concrete.y, 20.0)
self.assertTrue(isinstance(concrete, AbstractFoo))
def test_registered(self):
foolike = FooLike()
self.assertTrue(isinstance(foolike, AbstractFoo))
def test_post_hoc_mixing(self):
class TraitedBar(HasTraits, AbstractBar):
__metaclass__ = ABCMetaHasTraits
x = Int(10)
def bar(self):
return 'bar'
traited = TraitedBar()
self.assertTrue(isinstance(traited, AbstractBar))
self.assertEquals(traited.x, 10)
except ImportError:
pass
|
Add some tests for ABCHasTraits.
|
TST: Add some tests for ABCHasTraits.
|
Python
|
bsd-3-clause
|
burnpanck/traits,burnpanck/traits
|
TST: Add some tests for ABCHasTraits.
|
""" Test the ABC functionality.
"""
import unittest
try:
import abc
from ..api import ABCHasTraits, ABCMetaHasTraits, HasTraits, Int, Float
class AbstractFoo(ABCHasTraits):
x = Int(10)
y = Float(20.0)
@abc.abstractmethod
def foo(self):
raise NotImplementedError()
@abc.abstractproperty
def bar(self):
raise NotImplementedError()
class ConcreteFoo(AbstractFoo):
def foo(self):
return 'foo'
@property
def bar(self):
return 'bar'
class FooLike(HasTraits):
x = Int(10)
y = Float(20.0)
def foo(self):
return 'foo'
@property
def bar(self):
return 'bar'
AbstractFoo.register(FooLike)
class AbstractBar(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def bar(self):
raise NotImplementedError()
class TestABC(unittest.TestCase):
def test_basic_abc(self):
self.assertRaises(TypeError, AbstractFoo)
concrete = ConcreteFoo()
self.assertEquals(concrete.foo(), 'foo')
self.assertEquals(concrete.bar, 'bar')
self.assertEquals(concrete.x, 10)
self.assertEquals(concrete.y, 20.0)
self.assertTrue(isinstance(concrete, AbstractFoo))
def test_registered(self):
foolike = FooLike()
self.assertTrue(isinstance(foolike, AbstractFoo))
def test_post_hoc_mixing(self):
class TraitedBar(HasTraits, AbstractBar):
__metaclass__ = ABCMetaHasTraits
x = Int(10)
def bar(self):
return 'bar'
traited = TraitedBar()
self.assertTrue(isinstance(traited, AbstractBar))
self.assertEquals(traited.x, 10)
except ImportError:
pass
|
<commit_before><commit_msg>TST: Add some tests for ABCHasTraits.<commit_after>
|
""" Test the ABC functionality.
"""
import unittest
try:
import abc
from ..api import ABCHasTraits, ABCMetaHasTraits, HasTraits, Int, Float
class AbstractFoo(ABCHasTraits):
x = Int(10)
y = Float(20.0)
@abc.abstractmethod
def foo(self):
raise NotImplementedError()
@abc.abstractproperty
def bar(self):
raise NotImplementedError()
class ConcreteFoo(AbstractFoo):
def foo(self):
return 'foo'
@property
def bar(self):
return 'bar'
class FooLike(HasTraits):
x = Int(10)
y = Float(20.0)
def foo(self):
return 'foo'
@property
def bar(self):
return 'bar'
AbstractFoo.register(FooLike)
class AbstractBar(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def bar(self):
raise NotImplementedError()
class TestABC(unittest.TestCase):
def test_basic_abc(self):
self.assertRaises(TypeError, AbstractFoo)
concrete = ConcreteFoo()
self.assertEquals(concrete.foo(), 'foo')
self.assertEquals(concrete.bar, 'bar')
self.assertEquals(concrete.x, 10)
self.assertEquals(concrete.y, 20.0)
self.assertTrue(isinstance(concrete, AbstractFoo))
def test_registered(self):
foolike = FooLike()
self.assertTrue(isinstance(foolike, AbstractFoo))
def test_post_hoc_mixing(self):
class TraitedBar(HasTraits, AbstractBar):
__metaclass__ = ABCMetaHasTraits
x = Int(10)
def bar(self):
return 'bar'
traited = TraitedBar()
self.assertTrue(isinstance(traited, AbstractBar))
self.assertEquals(traited.x, 10)
except ImportError:
pass
|
TST: Add some tests for ABCHasTraits.""" Test the ABC functionality.
"""
import unittest
try:
import abc
from ..api import ABCHasTraits, ABCMetaHasTraits, HasTraits, Int, Float
class AbstractFoo(ABCHasTraits):
x = Int(10)
y = Float(20.0)
@abc.abstractmethod
def foo(self):
raise NotImplementedError()
@abc.abstractproperty
def bar(self):
raise NotImplementedError()
class ConcreteFoo(AbstractFoo):
def foo(self):
return 'foo'
@property
def bar(self):
return 'bar'
class FooLike(HasTraits):
x = Int(10)
y = Float(20.0)
def foo(self):
return 'foo'
@property
def bar(self):
return 'bar'
AbstractFoo.register(FooLike)
class AbstractBar(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def bar(self):
raise NotImplementedError()
class TestABC(unittest.TestCase):
def test_basic_abc(self):
self.assertRaises(TypeError, AbstractFoo)
concrete = ConcreteFoo()
self.assertEquals(concrete.foo(), 'foo')
self.assertEquals(concrete.bar, 'bar')
self.assertEquals(concrete.x, 10)
self.assertEquals(concrete.y, 20.0)
self.assertTrue(isinstance(concrete, AbstractFoo))
def test_registered(self):
foolike = FooLike()
self.assertTrue(isinstance(foolike, AbstractFoo))
def test_post_hoc_mixing(self):
class TraitedBar(HasTraits, AbstractBar):
__metaclass__ = ABCMetaHasTraits
x = Int(10)
def bar(self):
return 'bar'
traited = TraitedBar()
self.assertTrue(isinstance(traited, AbstractBar))
self.assertEquals(traited.x, 10)
except ImportError:
pass
|
<commit_before><commit_msg>TST: Add some tests for ABCHasTraits.<commit_after>""" Test the ABC functionality.
"""
import unittest
try:
import abc
from ..api import ABCHasTraits, ABCMetaHasTraits, HasTraits, Int, Float
class AbstractFoo(ABCHasTraits):
x = Int(10)
y = Float(20.0)
@abc.abstractmethod
def foo(self):
raise NotImplementedError()
@abc.abstractproperty
def bar(self):
raise NotImplementedError()
class ConcreteFoo(AbstractFoo):
def foo(self):
return 'foo'
@property
def bar(self):
return 'bar'
class FooLike(HasTraits):
x = Int(10)
y = Float(20.0)
def foo(self):
return 'foo'
@property
def bar(self):
return 'bar'
AbstractFoo.register(FooLike)
class AbstractBar(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def bar(self):
raise NotImplementedError()
class TestABC(unittest.TestCase):
def test_basic_abc(self):
self.assertRaises(TypeError, AbstractFoo)
concrete = ConcreteFoo()
self.assertEquals(concrete.foo(), 'foo')
self.assertEquals(concrete.bar, 'bar')
self.assertEquals(concrete.x, 10)
self.assertEquals(concrete.y, 20.0)
self.assertTrue(isinstance(concrete, AbstractFoo))
def test_registered(self):
foolike = FooLike()
self.assertTrue(isinstance(foolike, AbstractFoo))
def test_post_hoc_mixing(self):
class TraitedBar(HasTraits, AbstractBar):
__metaclass__ = ABCMetaHasTraits
x = Int(10)
def bar(self):
return 'bar'
traited = TraitedBar()
self.assertTrue(isinstance(traited, AbstractBar))
self.assertEquals(traited.x, 10)
except ImportError:
pass
|
|
3fd9bfaea8b11a12fd7bdece9c1877c1a5a6afa6
|
tests/test_text.py
|
tests/test_text.py
|
# -*- coding: utf-8 -*-
from vdm.text import normalize, tokenize, url_slug
def test_normalize():
#Mixed case and str to unicode
assert normalize('BroWn') == normalize(u'Brown')
#Trailing spaces
assert normalize(' Brown ') == normalize('Brown')
#removed accents
assert normalize(u'Èasy') == normalize('Easy')
#new lines
assert normalize('Brown\nUniv') == normalize('brown univ')
def test_tokenize():
tokens = [t for t in tokenize("Brown Univ.")]
assert tokens == ['Brown', 'Univ']
tokens = [t for t in tokenize("Brown Univ.02912")]
assert '02912' in tokens
def test_url_slug():
assert url_slug('Brown Univ') == 'brown-univ'
assert url_slug('Brown univ') == 'brown-univ'
assert url_slug('Brown.Univ') == 'brown_univ'
|
Add basic tests for text.
|
Add basic tests for text.
|
Python
|
mit
|
Brown-University-Library/vivo-data-management,Brown-University-Library/vivo-data-management
|
Add basic tests for text.
|
# -*- coding: utf-8 -*-
from vdm.text import normalize, tokenize, url_slug
def test_normalize():
#Mixed case and str to unicode
assert normalize('BroWn') == normalize(u'Brown')
#Trailing spaces
assert normalize(' Brown ') == normalize('Brown')
#removed accents
assert normalize(u'Èasy') == normalize('Easy')
#new lines
assert normalize('Brown\nUniv') == normalize('brown univ')
def test_tokenize():
tokens = [t for t in tokenize("Brown Univ.")]
assert tokens == ['Brown', 'Univ']
tokens = [t for t in tokenize("Brown Univ.02912")]
assert '02912' in tokens
def test_url_slug():
assert url_slug('Brown Univ') == 'brown-univ'
assert url_slug('Brown univ') == 'brown-univ'
assert url_slug('Brown.Univ') == 'brown_univ'
|
<commit_before><commit_msg>Add basic tests for text.<commit_after>
|
# -*- coding: utf-8 -*-
from vdm.text import normalize, tokenize, url_slug
def test_normalize():
#Mixed case and str to unicode
assert normalize('BroWn') == normalize(u'Brown')
#Trailing spaces
assert normalize(' Brown ') == normalize('Brown')
#removed accents
assert normalize(u'Èasy') == normalize('Easy')
#new lines
assert normalize('Brown\nUniv') == normalize('brown univ')
def test_tokenize():
tokens = [t for t in tokenize("Brown Univ.")]
assert tokens == ['Brown', 'Univ']
tokens = [t for t in tokenize("Brown Univ.02912")]
assert '02912' in tokens
def test_url_slug():
assert url_slug('Brown Univ') == 'brown-univ'
assert url_slug('Brown univ') == 'brown-univ'
assert url_slug('Brown.Univ') == 'brown_univ'
|
Add basic tests for text.# -*- coding: utf-8 -*-
from vdm.text import normalize, tokenize, url_slug
def test_normalize():
#Mixed case and str to unicode
assert normalize('BroWn') == normalize(u'Brown')
#Trailing spaces
assert normalize(' Brown ') == normalize('Brown')
#removed accents
assert normalize(u'Èasy') == normalize('Easy')
#new lines
assert normalize('Brown\nUniv') == normalize('brown univ')
def test_tokenize():
tokens = [t for t in tokenize("Brown Univ.")]
assert tokens == ['Brown', 'Univ']
tokens = [t for t in tokenize("Brown Univ.02912")]
assert '02912' in tokens
def test_url_slug():
assert url_slug('Brown Univ') == 'brown-univ'
assert url_slug('Brown univ') == 'brown-univ'
assert url_slug('Brown.Univ') == 'brown_univ'
|
<commit_before><commit_msg>Add basic tests for text.<commit_after># -*- coding: utf-8 -*-
from vdm.text import normalize, tokenize, url_slug
def test_normalize():
#Mixed case and str to unicode
assert normalize('BroWn') == normalize(u'Brown')
#Trailing spaces
assert normalize(' Brown ') == normalize('Brown')
#removed accents
assert normalize(u'Èasy') == normalize('Easy')
#new lines
assert normalize('Brown\nUniv') == normalize('brown univ')
def test_tokenize():
tokens = [t for t in tokenize("Brown Univ.")]
assert tokens == ['Brown', 'Univ']
tokens = [t for t in tokenize("Brown Univ.02912")]
assert '02912' in tokens
def test_url_slug():
assert url_slug('Brown Univ') == 'brown-univ'
assert url_slug('Brown univ') == 'brown-univ'
assert url_slug('Brown.Univ') == 'brown_univ'
|
|
69d3cb59c19410d4ac6b619c06fd9d1a9bba9154
|
changes/api/project_coverage_index.py
|
changes/api/project_coverage_index.py
|
from __future__ import absolute_import, division, unicode_literals
from flask.ext.restful import reqparse
from changes.api.base import APIView
from changes.constants import Result, Status
from changes.models import Build, Job, FileCoverage, Project, Source
SORT_CHOICES = (
'lines_covered',
'lines_uncovered',
'name',
)
class ProjectCoverageIndexAPIView(APIView):
parser = reqparse.RequestParser()
parser.add_argument('query', type=unicode, location='args')
parser.add_argument('sort', type=unicode, location='args',
choices=SORT_CHOICES, default='name')
def get(self, project_id):
project = Project.get(project_id)
if not project:
return '', 404
args = self.parser.parse_args()
latest_build = Build.query.join(
Source, Source.id == Build.source_id,
).filter(
Source.patch_id == None, # NOQA
Build.project_id == project.id,
Build.result == Result.passed,
Build.status == Status.finished,
).order_by(
Build.date_created.desc(),
).limit(1).first()
if not latest_build:
return self.respond([])
# use the most recent coverage
cover_list = FileCoverage.query.filter(
FileCoverage.job_id.in_(
Job.query.filter(Job.build_id == latest_build.id)
)
)
if args.query:
cover_list = cover_list.filter(
FileCoverage.filename.startswith(args.query),
)
if args.sort == 'lines_covered':
sort_by = FileCoverage.lines_covered.desc()
elif args.sort == 'lines_covered':
sort_by = FileCoverage.lines_uncovered.desc()
elif args.sort == 'name':
sort_by = FileCoverage.name.asc()
cover_list = cover_list.order_by(sort_by)
return self.paginate(cover_list)
|
Add project coverage index endpoint
|
Add project coverage index endpoint
|
Python
|
apache-2.0
|
wfxiang08/changes,bowlofstew/changes,wfxiang08/changes,bowlofstew/changes,dropbox/changes,dropbox/changes,dropbox/changes,wfxiang08/changes,bowlofstew/changes,bowlofstew/changes,wfxiang08/changes,dropbox/changes
|
Add project coverage index endpoint
|
from __future__ import absolute_import, division, unicode_literals
from flask.ext.restful import reqparse
from changes.api.base import APIView
from changes.constants import Result, Status
from changes.models import Build, Job, FileCoverage, Project, Source
SORT_CHOICES = (
'lines_covered',
'lines_uncovered',
'name',
)
class ProjectCoverageIndexAPIView(APIView):
parser = reqparse.RequestParser()
parser.add_argument('query', type=unicode, location='args')
parser.add_argument('sort', type=unicode, location='args',
choices=SORT_CHOICES, default='name')
def get(self, project_id):
project = Project.get(project_id)
if not project:
return '', 404
args = self.parser.parse_args()
latest_build = Build.query.join(
Source, Source.id == Build.source_id,
).filter(
Source.patch_id == None, # NOQA
Build.project_id == project.id,
Build.result == Result.passed,
Build.status == Status.finished,
).order_by(
Build.date_created.desc(),
).limit(1).first()
if not latest_build:
return self.respond([])
# use the most recent coverage
cover_list = FileCoverage.query.filter(
FileCoverage.job_id.in_(
Job.query.filter(Job.build_id == latest_build.id)
)
)
if args.query:
cover_list = cover_list.filter(
FileCoverage.filename.startswith(args.query),
)
if args.sort == 'lines_covered':
sort_by = FileCoverage.lines_covered.desc()
elif args.sort == 'lines_covered':
sort_by = FileCoverage.lines_uncovered.desc()
elif args.sort == 'name':
sort_by = FileCoverage.name.asc()
cover_list = cover_list.order_by(sort_by)
return self.paginate(cover_list)
|
<commit_before><commit_msg>Add project coverage index endpoint<commit_after>
|
from __future__ import absolute_import, division, unicode_literals
from flask.ext.restful import reqparse
from changes.api.base import APIView
from changes.constants import Result, Status
from changes.models import Build, Job, FileCoverage, Project, Source
SORT_CHOICES = (
'lines_covered',
'lines_uncovered',
'name',
)
class ProjectCoverageIndexAPIView(APIView):
parser = reqparse.RequestParser()
parser.add_argument('query', type=unicode, location='args')
parser.add_argument('sort', type=unicode, location='args',
choices=SORT_CHOICES, default='name')
def get(self, project_id):
project = Project.get(project_id)
if not project:
return '', 404
args = self.parser.parse_args()
latest_build = Build.query.join(
Source, Source.id == Build.source_id,
).filter(
Source.patch_id == None, # NOQA
Build.project_id == project.id,
Build.result == Result.passed,
Build.status == Status.finished,
).order_by(
Build.date_created.desc(),
).limit(1).first()
if not latest_build:
return self.respond([])
# use the most recent coverage
cover_list = FileCoverage.query.filter(
FileCoverage.job_id.in_(
Job.query.filter(Job.build_id == latest_build.id)
)
)
if args.query:
cover_list = cover_list.filter(
FileCoverage.filename.startswith(args.query),
)
if args.sort == 'lines_covered':
sort_by = FileCoverage.lines_covered.desc()
elif args.sort == 'lines_covered':
sort_by = FileCoverage.lines_uncovered.desc()
elif args.sort == 'name':
sort_by = FileCoverage.name.asc()
cover_list = cover_list.order_by(sort_by)
return self.paginate(cover_list)
|
Add project coverage index endpointfrom __future__ import absolute_import, division, unicode_literals
from flask.ext.restful import reqparse
from changes.api.base import APIView
from changes.constants import Result, Status
from changes.models import Build, Job, FileCoverage, Project, Source
SORT_CHOICES = (
'lines_covered',
'lines_uncovered',
'name',
)
class ProjectCoverageIndexAPIView(APIView):
parser = reqparse.RequestParser()
parser.add_argument('query', type=unicode, location='args')
parser.add_argument('sort', type=unicode, location='args',
choices=SORT_CHOICES, default='name')
def get(self, project_id):
project = Project.get(project_id)
if not project:
return '', 404
args = self.parser.parse_args()
latest_build = Build.query.join(
Source, Source.id == Build.source_id,
).filter(
Source.patch_id == None, # NOQA
Build.project_id == project.id,
Build.result == Result.passed,
Build.status == Status.finished,
).order_by(
Build.date_created.desc(),
).limit(1).first()
if not latest_build:
return self.respond([])
# use the most recent coverage
cover_list = FileCoverage.query.filter(
FileCoverage.job_id.in_(
Job.query.filter(Job.build_id == latest_build.id)
)
)
if args.query:
cover_list = cover_list.filter(
FileCoverage.filename.startswith(args.query),
)
if args.sort == 'lines_covered':
sort_by = FileCoverage.lines_covered.desc()
elif args.sort == 'lines_covered':
sort_by = FileCoverage.lines_uncovered.desc()
elif args.sort == 'name':
sort_by = FileCoverage.name.asc()
cover_list = cover_list.order_by(sort_by)
return self.paginate(cover_list)
|
<commit_before><commit_msg>Add project coverage index endpoint<commit_after>from __future__ import absolute_import, division, unicode_literals
from flask.ext.restful import reqparse
from changes.api.base import APIView
from changes.constants import Result, Status
from changes.models import Build, Job, FileCoverage, Project, Source
SORT_CHOICES = (
'lines_covered',
'lines_uncovered',
'name',
)
class ProjectCoverageIndexAPIView(APIView):
parser = reqparse.RequestParser()
parser.add_argument('query', type=unicode, location='args')
parser.add_argument('sort', type=unicode, location='args',
choices=SORT_CHOICES, default='name')
def get(self, project_id):
project = Project.get(project_id)
if not project:
return '', 404
args = self.parser.parse_args()
latest_build = Build.query.join(
Source, Source.id == Build.source_id,
).filter(
Source.patch_id == None, # NOQA
Build.project_id == project.id,
Build.result == Result.passed,
Build.status == Status.finished,
).order_by(
Build.date_created.desc(),
).limit(1).first()
if not latest_build:
return self.respond([])
# use the most recent coverage
cover_list = FileCoverage.query.filter(
FileCoverage.job_id.in_(
Job.query.filter(Job.build_id == latest_build.id)
)
)
if args.query:
cover_list = cover_list.filter(
FileCoverage.filename.startswith(args.query),
)
if args.sort == 'lines_covered':
sort_by = FileCoverage.lines_covered.desc()
elif args.sort == 'lines_covered':
sort_by = FileCoverage.lines_uncovered.desc()
elif args.sort == 'name':
sort_by = FileCoverage.name.asc()
cover_list = cover_list.order_by(sort_by)
return self.paginate(cover_list)
|
|
4a41fb3a3b6a689e3cc3c83d711331ad743824de
|
py/longest-univalue-path.py
|
py/longest-univalue-path.py
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def dfs(self, cur):
""" return longest univalue path, longest univalue path to children from self"""
if cur:
llup, llups = self.dfs(cur.left)
rlup, rlups = self.dfs(cur.right)
lup = 1
lups = 1
if cur.left and cur.val == cur.left.val:
lup += llups
lups = max(lups, 1 + llups)
if cur.right and cur.val == cur.right.val:
lup += rlups
lups = max(lups, 1 + rlups)
return max(llup, rlup, lup), lups
else:
return 0, 0
def longestUnivaluePath(self, root):
"""
:type root: TreeNode
:rtype: int
"""
return max(self.dfs(root)[0] - 1, 0)
|
Add py solution for 687. Longest Univalue Path
|
Add py solution for 687. Longest Univalue Path
687. Longest Univalue Path: https://leetcode.com/problems/longest-univalue-path/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 687. Longest Univalue Path
687. Longest Univalue Path: https://leetcode.com/problems/longest-univalue-path/
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def dfs(self, cur):
""" return longest univalue path, longest univalue path to children from self"""
if cur:
llup, llups = self.dfs(cur.left)
rlup, rlups = self.dfs(cur.right)
lup = 1
lups = 1
if cur.left and cur.val == cur.left.val:
lup += llups
lups = max(lups, 1 + llups)
if cur.right and cur.val == cur.right.val:
lup += rlups
lups = max(lups, 1 + rlups)
return max(llup, rlup, lup), lups
else:
return 0, 0
def longestUnivaluePath(self, root):
"""
:type root: TreeNode
:rtype: int
"""
return max(self.dfs(root)[0] - 1, 0)
|
<commit_before><commit_msg>Add py solution for 687. Longest Univalue Path
687. Longest Univalue Path: https://leetcode.com/problems/longest-univalue-path/<commit_after>
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def dfs(self, cur):
""" return longest univalue path, longest univalue path to children from self"""
if cur:
llup, llups = self.dfs(cur.left)
rlup, rlups = self.dfs(cur.right)
lup = 1
lups = 1
if cur.left and cur.val == cur.left.val:
lup += llups
lups = max(lups, 1 + llups)
if cur.right and cur.val == cur.right.val:
lup += rlups
lups = max(lups, 1 + rlups)
return max(llup, rlup, lup), lups
else:
return 0, 0
def longestUnivaluePath(self, root):
"""
:type root: TreeNode
:rtype: int
"""
return max(self.dfs(root)[0] - 1, 0)
|
Add py solution for 687. Longest Univalue Path
687. Longest Univalue Path: https://leetcode.com/problems/longest-univalue-path/# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def dfs(self, cur):
""" return longest univalue path, longest univalue path to children from self"""
if cur:
llup, llups = self.dfs(cur.left)
rlup, rlups = self.dfs(cur.right)
lup = 1
lups = 1
if cur.left and cur.val == cur.left.val:
lup += llups
lups = max(lups, 1 + llups)
if cur.right and cur.val == cur.right.val:
lup += rlups
lups = max(lups, 1 + rlups)
return max(llup, rlup, lup), lups
else:
return 0, 0
def longestUnivaluePath(self, root):
"""
:type root: TreeNode
:rtype: int
"""
return max(self.dfs(root)[0] - 1, 0)
|
<commit_before><commit_msg>Add py solution for 687. Longest Univalue Path
687. Longest Univalue Path: https://leetcode.com/problems/longest-univalue-path/<commit_after># Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def dfs(self, cur):
""" return longest univalue path, longest univalue path to children from self"""
if cur:
llup, llups = self.dfs(cur.left)
rlup, rlups = self.dfs(cur.right)
lup = 1
lups = 1
if cur.left and cur.val == cur.left.val:
lup += llups
lups = max(lups, 1 + llups)
if cur.right and cur.val == cur.right.val:
lup += rlups
lups = max(lups, 1 + rlups)
return max(llup, rlup, lup), lups
else:
return 0, 0
def longestUnivaluePath(self, root):
"""
:type root: TreeNode
:rtype: int
"""
return max(self.dfs(root)[0] - 1, 0)
|
|
5b531877f30a323de3171422916c29f20921a18a
|
pylearn2/costs/tests/test_lp_norm_cost.py
|
pylearn2/costs/tests/test_lp_norm_cost.py
|
"""
Test LpNorm cost
"""
import numpy
import theano
from theano import tensor as T
from nose.tools import raises
def test_shared_variables():
'''
LpNorm should handle shared variables.
'''
assert False
def test_symbolic_expressions_of_shared_variables():
'''
LpNorm should handle symbolic expressions of shared variables.
'''
assert False
@raises(Exception)
def test_symbolic_variables():
'''
LpNorm should not handle symbolic variables
'''
assert True
if __name__ == '__main__':
test_shared_variables()
test_symbolic_expressions_of_shared_variables()
test_symbolic_variables()
|
Add unit test for LpNorm
|
Add unit test for LpNorm
|
Python
|
bsd-3-clause
|
woozzu/pylearn2,Refefer/pylearn2,ddboline/pylearn2,nouiz/pylearn2,w1kke/pylearn2,pkainz/pylearn2,fulmicoton/pylearn2,mkraemer67/pylearn2,pkainz/pylearn2,se4u/pylearn2,cosmoharrigan/pylearn2,skearnes/pylearn2,msingh172/pylearn2,JesseLivezey/pylearn2,pombredanne/pylearn2,ddboline/pylearn2,fulmicoton/pylearn2,pombredanne/pylearn2,nouiz/pylearn2,pombredanne/pylearn2,lunyang/pylearn2,matrogers/pylearn2,Refefer/pylearn2,w1kke/pylearn2,TNick/pylearn2,JesseLivezey/plankton,fishcorn/pylearn2,JesseLivezey/plankton,alexjc/pylearn2,msingh172/pylearn2,CIFASIS/pylearn2,woozzu/pylearn2,msingh172/pylearn2,mclaughlin6464/pylearn2,woozzu/pylearn2,bartvm/pylearn2,lamblin/pylearn2,junbochen/pylearn2,alexjc/pylearn2,JesseLivezey/plankton,CIFASIS/pylearn2,ashhher3/pylearn2,jeremyfix/pylearn2,jamessergeant/pylearn2,theoryno3/pylearn2,aalmah/pylearn2,KennethPierce/pylearnk,shiquanwang/pylearn2,ddboline/pylearn2,chrish42/pylearn,ashhher3/pylearn2,hyqneuron/pylearn2-maxsom,goodfeli/pylearn2,abergeron/pylearn2,JesseLivezey/pylearn2,matrogers/pylearn2,hantek/pylearn2,daemonmaker/pylearn2,goodfeli/pylearn2,pkainz/pylearn2,lunyang/pylearn2,w1kke/pylearn2,theoryno3/pylearn2,fishcorn/pylearn2,nouiz/pylearn2,w1kke/pylearn2,hantek/pylearn2,lamblin/pylearn2,sandeepkbhat/pylearn2,fyffyt/pylearn2,pkainz/pylearn2,shiquanwang/pylearn2,Refefer/pylearn2,kastnerkyle/pylearn2,junbochen/pylearn2,lisa-lab/pylearn2,bartvm/pylearn2,junbochen/pylearn2,theoryno3/pylearn2,aalmah/pylearn2,lancezlin/pylearn2,fishcorn/pylearn2,woozzu/pylearn2,jamessergeant/pylearn2,kastnerkyle/pylearn2,kose-y/pylearn2,jeremyfix/pylearn2,JesseLivezey/plankton,goodfeli/pylearn2,abergeron/pylearn2,abergeron/pylearn2,lunyang/pylearn2,ashhher3/pylearn2,KennethPierce/pylearnk,theoryno3/pylearn2,CIFASIS/pylearn2,hyqneuron/pylearn2-maxsom,ashhher3/pylearn2,TNick/pylearn2,chrish42/pylearn,jeremyfix/pylearn2,sandeepkbhat/pylearn2,lancezlin/pylearn2,lancezlin/pylearn2,KennethPierce/pylearnk,chrish42/pylearn,kose-y/pylearn2,daemonmaker/pylearn2,se4u/pylearn2,TNick/pylearn2,hyqneuron/pylearn2-maxsom,skearnes/pylearn2,alexjc/pylearn2,aalmah/pylearn2,caidongyun/pylearn2,caidongyun/pylearn2,se4u/pylearn2,fishcorn/pylearn2,sandeepkbhat/pylearn2,kastnerkyle/pylearn2,JesseLivezey/pylearn2,JesseLivezey/pylearn2,daemonmaker/pylearn2,lancezlin/pylearn2,mclaughlin6464/pylearn2,mkraemer67/pylearn2,jamessergeant/pylearn2,lamblin/pylearn2,pombredanne/pylearn2,bartvm/pylearn2,skearnes/pylearn2,jamessergeant/pylearn2,fyffyt/pylearn2,hyqneuron/pylearn2-maxsom,sandeepkbhat/pylearn2,fyffyt/pylearn2,mkraemer67/pylearn2,lamblin/pylearn2,fulmicoton/pylearn2,cosmoharrigan/pylearn2,cosmoharrigan/pylearn2,ddboline/pylearn2,matrogers/pylearn2,mclaughlin6464/pylearn2,junbochen/pylearn2,aalmah/pylearn2,caidongyun/pylearn2,shiquanwang/pylearn2,CIFASIS/pylearn2,skearnes/pylearn2,shiquanwang/pylearn2,daemonmaker/pylearn2,abergeron/pylearn2,kose-y/pylearn2,hantek/pylearn2,jeremyfix/pylearn2,cosmoharrigan/pylearn2,KennethPierce/pylearnk,mclaughlin6464/pylearn2,matrogers/pylearn2,caidongyun/pylearn2,chrish42/pylearn,nouiz/pylearn2,fulmicoton/pylearn2,lisa-lab/pylearn2,bartvm/pylearn2,alexjc/pylearn2,fyffyt/pylearn2,lunyang/pylearn2,se4u/pylearn2,Refefer/pylearn2,lisa-lab/pylearn2,TNick/pylearn2,mkraemer67/pylearn2,lisa-lab/pylearn2,msingh172/pylearn2,kastnerkyle/pylearn2,hantek/pylearn2,goodfeli/pylearn2,kose-y/pylearn2
|
Add unit test for LpNorm
|
"""
Test LpNorm cost
"""
import numpy
import theano
from theano import tensor as T
from nose.tools import raises
def test_shared_variables():
'''
LpNorm should handle shared variables.
'''
assert False
def test_symbolic_expressions_of_shared_variables():
'''
LpNorm should handle symbolic expressions of shared variables.
'''
assert False
@raises(Exception)
def test_symbolic_variables():
'''
LpNorm should not handle symbolic variables
'''
assert True
if __name__ == '__main__':
test_shared_variables()
test_symbolic_expressions_of_shared_variables()
test_symbolic_variables()
|
<commit_before><commit_msg>Add unit test for LpNorm<commit_after>
|
"""
Test LpNorm cost
"""
import numpy
import theano
from theano import tensor as T
from nose.tools import raises
def test_shared_variables():
'''
LpNorm should handle shared variables.
'''
assert False
def test_symbolic_expressions_of_shared_variables():
'''
LpNorm should handle symbolic expressions of shared variables.
'''
assert False
@raises(Exception)
def test_symbolic_variables():
'''
LpNorm should not handle symbolic variables
'''
assert True
if __name__ == '__main__':
test_shared_variables()
test_symbolic_expressions_of_shared_variables()
test_symbolic_variables()
|
Add unit test for LpNorm"""
Test LpNorm cost
"""
import numpy
import theano
from theano import tensor as T
from nose.tools import raises
def test_shared_variables():
'''
LpNorm should handle shared variables.
'''
assert False
def test_symbolic_expressions_of_shared_variables():
'''
LpNorm should handle symbolic expressions of shared variables.
'''
assert False
@raises(Exception)
def test_symbolic_variables():
'''
LpNorm should not handle symbolic variables
'''
assert True
if __name__ == '__main__':
test_shared_variables()
test_symbolic_expressions_of_shared_variables()
test_symbolic_variables()
|
<commit_before><commit_msg>Add unit test for LpNorm<commit_after>"""
Test LpNorm cost
"""
import numpy
import theano
from theano import tensor as T
from nose.tools import raises
def test_shared_variables():
'''
LpNorm should handle shared variables.
'''
assert False
def test_symbolic_expressions_of_shared_variables():
'''
LpNorm should handle symbolic expressions of shared variables.
'''
assert False
@raises(Exception)
def test_symbolic_variables():
'''
LpNorm should not handle symbolic variables
'''
assert True
if __name__ == '__main__':
test_shared_variables()
test_symbolic_expressions_of_shared_variables()
test_symbolic_variables()
|
|
1206fe0a6f57bc925dc01db03c604b7b786384aa
|
misc/migrate_miro_vhs.py
|
misc/migrate_miro_vhs.py
|
#!/usr/bin/env python
# -*- encoding: utf-8
import boto3
def get_existing_records(dynamodb_client):
"""
Generates existing Miro records from the SourceData table.
"""
paginator = dynamodb_client.get_paginator('scan')
for page in paginator.paginate(TableName='SourceData'):
for item in page['Items']:
yield item
if __name__ == '__main__':
dynamodb_client = boto3.client('dynamodb')
for item in get_existing_records(dynamodb_client):
print(item)
break
|
Add the initial draft of the migration script
|
Add the initial draft of the migration script
|
Python
|
mit
|
wellcometrust/platform-api,wellcometrust/platform-api,wellcometrust/platform-api,wellcometrust/platform-api
|
Add the initial draft of the migration script
|
#!/usr/bin/env python
# -*- encoding: utf-8
import boto3
def get_existing_records(dynamodb_client):
"""
Generates existing Miro records from the SourceData table.
"""
paginator = dynamodb_client.get_paginator('scan')
for page in paginator.paginate(TableName='SourceData'):
for item in page['Items']:
yield item
if __name__ == '__main__':
dynamodb_client = boto3.client('dynamodb')
for item in get_existing_records(dynamodb_client):
print(item)
break
|
<commit_before><commit_msg>Add the initial draft of the migration script<commit_after>
|
#!/usr/bin/env python
# -*- encoding: utf-8
import boto3
def get_existing_records(dynamodb_client):
"""
Generates existing Miro records from the SourceData table.
"""
paginator = dynamodb_client.get_paginator('scan')
for page in paginator.paginate(TableName='SourceData'):
for item in page['Items']:
yield item
if __name__ == '__main__':
dynamodb_client = boto3.client('dynamodb')
for item in get_existing_records(dynamodb_client):
print(item)
break
|
Add the initial draft of the migration script#!/usr/bin/env python
# -*- encoding: utf-8
import boto3
def get_existing_records(dynamodb_client):
"""
Generates existing Miro records from the SourceData table.
"""
paginator = dynamodb_client.get_paginator('scan')
for page in paginator.paginate(TableName='SourceData'):
for item in page['Items']:
yield item
if __name__ == '__main__':
dynamodb_client = boto3.client('dynamodb')
for item in get_existing_records(dynamodb_client):
print(item)
break
|
<commit_before><commit_msg>Add the initial draft of the migration script<commit_after>#!/usr/bin/env python
# -*- encoding: utf-8
import boto3
def get_existing_records(dynamodb_client):
"""
Generates existing Miro records from the SourceData table.
"""
paginator = dynamodb_client.get_paginator('scan')
for page in paginator.paginate(TableName='SourceData'):
for item in page['Items']:
yield item
if __name__ == '__main__':
dynamodb_client = boto3.client('dynamodb')
for item in get_existing_records(dynamodb_client):
print(item)
break
|
|
a3cad465c43cc88ac382e0bf44166d49420ae02e
|
dash/plotly_graph.py
|
dash/plotly_graph.py
|
import datetime
import time
import random
from dash.react import Dash
from dash_html_components import Div
from dash_core_components import Dropdown, PlotlyGraph
dash = Dash(__name__)
graph_json = {
'figure': {
'layout': {
'barmode': 'stack'
}
}
}
graph_data = [
[{
'x': [
20,
14,
23
],
'y': [
'giraffes',
'orangutans',
'monkeys'
],
'marker': {
'color': 'rgba(55, 128, 191, 0.6)',
'line': {
'color': 'rgba(55, 128, 191, 1.0)',
'width': 1
}
},
'name': 'SF Zoo',
'orientation': 'h',
'type': 'bar'
}],
[{
'x': [
12,
18,
29
],
'y': [
'giraffes',
'orangutans',
'monkeys'
],
'marker': {
'color': 'rgba(255, 153, 51, 0.6)',
'line': {
'color': 'rgba(255, 153, 51, 1.0)',
'width': 1
}
},
'name': 'LA Zoo',
'orientation': 'h',
'type': 'bar'
}]
]
dash.layout = Div(id='wrapper', content=[
Dropdown(id='data_source', options=[
{'value': '0', 'label': 'Data set 0'},
{'value': '1', 'label': 'Data set 1'}
]),
PlotlyGraph(id='graph', figure=graph_json)
])
def update_graph(dropdown):
dropdown_value = int(dropdown['props']['value'])
selected_data = graph_data[dropdown_value]
return {
'figure': {
'data': selected_data
}
}
dash.react('graph', ['data_source'])(update_graph)
if __name__ == '__main__':
dash.server.run(port=8050, debug=True)
|
Add plotly graph example with animals
|
Add plotly graph example with animals
|
Python
|
mit
|
plotly/dash,plotly/dash,plotly/dash,plotly/dash,plotly/dash
|
Add plotly graph example with animals
|
import datetime
import time
import random
from dash.react import Dash
from dash_html_components import Div
from dash_core_components import Dropdown, PlotlyGraph
dash = Dash(__name__)
graph_json = {
'figure': {
'layout': {
'barmode': 'stack'
}
}
}
graph_data = [
[{
'x': [
20,
14,
23
],
'y': [
'giraffes',
'orangutans',
'monkeys'
],
'marker': {
'color': 'rgba(55, 128, 191, 0.6)',
'line': {
'color': 'rgba(55, 128, 191, 1.0)',
'width': 1
}
},
'name': 'SF Zoo',
'orientation': 'h',
'type': 'bar'
}],
[{
'x': [
12,
18,
29
],
'y': [
'giraffes',
'orangutans',
'monkeys'
],
'marker': {
'color': 'rgba(255, 153, 51, 0.6)',
'line': {
'color': 'rgba(255, 153, 51, 1.0)',
'width': 1
}
},
'name': 'LA Zoo',
'orientation': 'h',
'type': 'bar'
}]
]
dash.layout = Div(id='wrapper', content=[
Dropdown(id='data_source', options=[
{'value': '0', 'label': 'Data set 0'},
{'value': '1', 'label': 'Data set 1'}
]),
PlotlyGraph(id='graph', figure=graph_json)
])
def update_graph(dropdown):
dropdown_value = int(dropdown['props']['value'])
selected_data = graph_data[dropdown_value]
return {
'figure': {
'data': selected_data
}
}
dash.react('graph', ['data_source'])(update_graph)
if __name__ == '__main__':
dash.server.run(port=8050, debug=True)
|
<commit_before><commit_msg>Add plotly graph example with animals<commit_after>
|
import datetime
import time
import random
from dash.react import Dash
from dash_html_components import Div
from dash_core_components import Dropdown, PlotlyGraph
dash = Dash(__name__)
graph_json = {
'figure': {
'layout': {
'barmode': 'stack'
}
}
}
graph_data = [
[{
'x': [
20,
14,
23
],
'y': [
'giraffes',
'orangutans',
'monkeys'
],
'marker': {
'color': 'rgba(55, 128, 191, 0.6)',
'line': {
'color': 'rgba(55, 128, 191, 1.0)',
'width': 1
}
},
'name': 'SF Zoo',
'orientation': 'h',
'type': 'bar'
}],
[{
'x': [
12,
18,
29
],
'y': [
'giraffes',
'orangutans',
'monkeys'
],
'marker': {
'color': 'rgba(255, 153, 51, 0.6)',
'line': {
'color': 'rgba(255, 153, 51, 1.0)',
'width': 1
}
},
'name': 'LA Zoo',
'orientation': 'h',
'type': 'bar'
}]
]
dash.layout = Div(id='wrapper', content=[
Dropdown(id='data_source', options=[
{'value': '0', 'label': 'Data set 0'},
{'value': '1', 'label': 'Data set 1'}
]),
PlotlyGraph(id='graph', figure=graph_json)
])
def update_graph(dropdown):
dropdown_value = int(dropdown['props']['value'])
selected_data = graph_data[dropdown_value]
return {
'figure': {
'data': selected_data
}
}
dash.react('graph', ['data_source'])(update_graph)
if __name__ == '__main__':
dash.server.run(port=8050, debug=True)
|
Add plotly graph example with animalsimport datetime
import time
import random
from dash.react import Dash
from dash_html_components import Div
from dash_core_components import Dropdown, PlotlyGraph
dash = Dash(__name__)
graph_json = {
'figure': {
'layout': {
'barmode': 'stack'
}
}
}
graph_data = [
[{
'x': [
20,
14,
23
],
'y': [
'giraffes',
'orangutans',
'monkeys'
],
'marker': {
'color': 'rgba(55, 128, 191, 0.6)',
'line': {
'color': 'rgba(55, 128, 191, 1.0)',
'width': 1
}
},
'name': 'SF Zoo',
'orientation': 'h',
'type': 'bar'
}],
[{
'x': [
12,
18,
29
],
'y': [
'giraffes',
'orangutans',
'monkeys'
],
'marker': {
'color': 'rgba(255, 153, 51, 0.6)',
'line': {
'color': 'rgba(255, 153, 51, 1.0)',
'width': 1
}
},
'name': 'LA Zoo',
'orientation': 'h',
'type': 'bar'
}]
]
dash.layout = Div(id='wrapper', content=[
Dropdown(id='data_source', options=[
{'value': '0', 'label': 'Data set 0'},
{'value': '1', 'label': 'Data set 1'}
]),
PlotlyGraph(id='graph', figure=graph_json)
])
def update_graph(dropdown):
dropdown_value = int(dropdown['props']['value'])
selected_data = graph_data[dropdown_value]
return {
'figure': {
'data': selected_data
}
}
dash.react('graph', ['data_source'])(update_graph)
if __name__ == '__main__':
dash.server.run(port=8050, debug=True)
|
<commit_before><commit_msg>Add plotly graph example with animals<commit_after>import datetime
import time
import random
from dash.react import Dash
from dash_html_components import Div
from dash_core_components import Dropdown, PlotlyGraph
dash = Dash(__name__)
graph_json = {
'figure': {
'layout': {
'barmode': 'stack'
}
}
}
graph_data = [
[{
'x': [
20,
14,
23
],
'y': [
'giraffes',
'orangutans',
'monkeys'
],
'marker': {
'color': 'rgba(55, 128, 191, 0.6)',
'line': {
'color': 'rgba(55, 128, 191, 1.0)',
'width': 1
}
},
'name': 'SF Zoo',
'orientation': 'h',
'type': 'bar'
}],
[{
'x': [
12,
18,
29
],
'y': [
'giraffes',
'orangutans',
'monkeys'
],
'marker': {
'color': 'rgba(255, 153, 51, 0.6)',
'line': {
'color': 'rgba(255, 153, 51, 1.0)',
'width': 1
}
},
'name': 'LA Zoo',
'orientation': 'h',
'type': 'bar'
}]
]
dash.layout = Div(id='wrapper', content=[
Dropdown(id='data_source', options=[
{'value': '0', 'label': 'Data set 0'},
{'value': '1', 'label': 'Data set 1'}
]),
PlotlyGraph(id='graph', figure=graph_json)
])
def update_graph(dropdown):
dropdown_value = int(dropdown['props']['value'])
selected_data = graph_data[dropdown_value]
return {
'figure': {
'data': selected_data
}
}
dash.react('graph', ['data_source'])(update_graph)
if __name__ == '__main__':
dash.server.run(port=8050, debug=True)
|
|
32301da235d40ae3eca7dcbdd7c8438d3c35f775
|
app/task.py
|
app/task.py
|
from mongoengine import Document, DateTimeField, EmailField, IntField, \
ReferenceField, StringField
import datetime, enum
class Priority(enum.IntEnum):
LOW = 0,
MIDDLE = 1,
HIGH = 2
"""
This defines the basic model for a Task as we want it to be stored in the
MongoDB.
"""
class Task(Document):
title = StringField(max_length=150, required=True)
description = StringField(max_length=800, required=True)
creator = EmailField(max_length=120, required=True)
assigne = EmailField(max_length=120, required=True)
created_at = DateTimeField(default=datetime.datetime.now, required=True)
closed_at = DateTimeField(required=False)
status = IntField(default=0, required=True)
priority = IntField(default=Priority.LOW, required=True)
|
Add a basic model for a Task
|
Add a basic model for a Task
|
Python
|
mit
|
Zillolo/lazy-todo
|
Add a basic model for a Task
|
from mongoengine import Document, DateTimeField, EmailField, IntField, \
ReferenceField, StringField
import datetime, enum
class Priority(enum.IntEnum):
LOW = 0,
MIDDLE = 1,
HIGH = 2
"""
This defines the basic model for a Task as we want it to be stored in the
MongoDB.
"""
class Task(Document):
title = StringField(max_length=150, required=True)
description = StringField(max_length=800, required=True)
creator = EmailField(max_length=120, required=True)
assigne = EmailField(max_length=120, required=True)
created_at = DateTimeField(default=datetime.datetime.now, required=True)
closed_at = DateTimeField(required=False)
status = IntField(default=0, required=True)
priority = IntField(default=Priority.LOW, required=True)
|
<commit_before><commit_msg>Add a basic model for a Task<commit_after>
|
from mongoengine import Document, DateTimeField, EmailField, IntField, \
ReferenceField, StringField
import datetime, enum
class Priority(enum.IntEnum):
LOW = 0,
MIDDLE = 1,
HIGH = 2
"""
This defines the basic model for a Task as we want it to be stored in the
MongoDB.
"""
class Task(Document):
title = StringField(max_length=150, required=True)
description = StringField(max_length=800, required=True)
creator = EmailField(max_length=120, required=True)
assigne = EmailField(max_length=120, required=True)
created_at = DateTimeField(default=datetime.datetime.now, required=True)
closed_at = DateTimeField(required=False)
status = IntField(default=0, required=True)
priority = IntField(default=Priority.LOW, required=True)
|
Add a basic model for a Taskfrom mongoengine import Document, DateTimeField, EmailField, IntField, \
ReferenceField, StringField
import datetime, enum
class Priority(enum.IntEnum):
LOW = 0,
MIDDLE = 1,
HIGH = 2
"""
This defines the basic model for a Task as we want it to be stored in the
MongoDB.
"""
class Task(Document):
title = StringField(max_length=150, required=True)
description = StringField(max_length=800, required=True)
creator = EmailField(max_length=120, required=True)
assigne = EmailField(max_length=120, required=True)
created_at = DateTimeField(default=datetime.datetime.now, required=True)
closed_at = DateTimeField(required=False)
status = IntField(default=0, required=True)
priority = IntField(default=Priority.LOW, required=True)
|
<commit_before><commit_msg>Add a basic model for a Task<commit_after>from mongoengine import Document, DateTimeField, EmailField, IntField, \
ReferenceField, StringField
import datetime, enum
class Priority(enum.IntEnum):
LOW = 0,
MIDDLE = 1,
HIGH = 2
"""
This defines the basic model for a Task as we want it to be stored in the
MongoDB.
"""
class Task(Document):
title = StringField(max_length=150, required=True)
description = StringField(max_length=800, required=True)
creator = EmailField(max_length=120, required=True)
assigne = EmailField(max_length=120, required=True)
created_at = DateTimeField(default=datetime.datetime.now, required=True)
closed_at = DateTimeField(required=False)
status = IntField(default=0, required=True)
priority = IntField(default=Priority.LOW, required=True)
|
|
29a4c1e0d905b92e4c35d594183e73be31e5c89e
|
tesla/models.py
|
tesla/models.py
|
"""
Tesla car model
"""
import uuid
import json
from random import choice
__OFF__ = 'off'
__ON__ = 'on'
__COLOR__ = ['black', 'white', 'red', 'brown', 'gold', 'pink']
JSONEncoder_old = json.JSONEncoder.default
def JSONEncoder_new(self, obj):
if isinstance(obj, uuid.UUID): return str(obj)
return JSONEncoder_old(self, obj)
json.JSONEncoder.default = JSONEncoder_new
class TeslaVehicle(object):
def __init__(self, name, user_id, color, vehicle_id=None, state=None):
self.color = color
self.name = name
self.state = state or __OFF__
self.user_id = user_id
self.lights = __OFF__
self.horn = __OFF__
self.vehicle_id = vehicle_id or uuid.uuid4()
def __repr__(self):
return self.status
@property
def status(self):
return json.dumps(self.__dict__)
def hork_horn(self):
# TODO: Play a sound
return self.status
def switch_lights(self, action):
# if action not in [__OFF__, __ON__]: exception
self.lights = action
return self.status
class Garage(object):
_VEHICLES = {}
_INSTANCE = None
def create_vehicle(self, name, user_id):
vehicle = TeslaVehicle(name, user_id, choice(__COLOR__))
self._VEHICLES[str(vehicle.vehicle_id)] = vehicle
return vehicle
garage = Garage()
|
Add a basic vehicle model and garage
|
Add a basic vehicle model and garage
|
Python
|
mit
|
headforwards/tesla-simulator-server,headforwards/tesla-simulator-server
|
Add a basic vehicle model and garage
|
"""
Tesla car model
"""
import uuid
import json
from random import choice
__OFF__ = 'off'
__ON__ = 'on'
__COLOR__ = ['black', 'white', 'red', 'brown', 'gold', 'pink']
JSONEncoder_old = json.JSONEncoder.default
def JSONEncoder_new(self, obj):
if isinstance(obj, uuid.UUID): return str(obj)
return JSONEncoder_old(self, obj)
json.JSONEncoder.default = JSONEncoder_new
class TeslaVehicle(object):
def __init__(self, name, user_id, color, vehicle_id=None, state=None):
self.color = color
self.name = name
self.state = state or __OFF__
self.user_id = user_id
self.lights = __OFF__
self.horn = __OFF__
self.vehicle_id = vehicle_id or uuid.uuid4()
def __repr__(self):
return self.status
@property
def status(self):
return json.dumps(self.__dict__)
def hork_horn(self):
# TODO: Play a sound
return self.status
def switch_lights(self, action):
# if action not in [__OFF__, __ON__]: exception
self.lights = action
return self.status
class Garage(object):
_VEHICLES = {}
_INSTANCE = None
def create_vehicle(self, name, user_id):
vehicle = TeslaVehicle(name, user_id, choice(__COLOR__))
self._VEHICLES[str(vehicle.vehicle_id)] = vehicle
return vehicle
garage = Garage()
|
<commit_before><commit_msg>Add a basic vehicle model and garage<commit_after>
|
"""
Tesla car model
"""
import uuid
import json
from random import choice
__OFF__ = 'off'
__ON__ = 'on'
__COLOR__ = ['black', 'white', 'red', 'brown', 'gold', 'pink']
JSONEncoder_old = json.JSONEncoder.default
def JSONEncoder_new(self, obj):
if isinstance(obj, uuid.UUID): return str(obj)
return JSONEncoder_old(self, obj)
json.JSONEncoder.default = JSONEncoder_new
class TeslaVehicle(object):
def __init__(self, name, user_id, color, vehicle_id=None, state=None):
self.color = color
self.name = name
self.state = state or __OFF__
self.user_id = user_id
self.lights = __OFF__
self.horn = __OFF__
self.vehicle_id = vehicle_id or uuid.uuid4()
def __repr__(self):
return self.status
@property
def status(self):
return json.dumps(self.__dict__)
def hork_horn(self):
# TODO: Play a sound
return self.status
def switch_lights(self, action):
# if action not in [__OFF__, __ON__]: exception
self.lights = action
return self.status
class Garage(object):
_VEHICLES = {}
_INSTANCE = None
def create_vehicle(self, name, user_id):
vehicle = TeslaVehicle(name, user_id, choice(__COLOR__))
self._VEHICLES[str(vehicle.vehicle_id)] = vehicle
return vehicle
garage = Garage()
|
Add a basic vehicle model and garage"""
Tesla car model
"""
import uuid
import json
from random import choice
__OFF__ = 'off'
__ON__ = 'on'
__COLOR__ = ['black', 'white', 'red', 'brown', 'gold', 'pink']
JSONEncoder_old = json.JSONEncoder.default
def JSONEncoder_new(self, obj):
if isinstance(obj, uuid.UUID): return str(obj)
return JSONEncoder_old(self, obj)
json.JSONEncoder.default = JSONEncoder_new
class TeslaVehicle(object):
def __init__(self, name, user_id, color, vehicle_id=None, state=None):
self.color = color
self.name = name
self.state = state or __OFF__
self.user_id = user_id
self.lights = __OFF__
self.horn = __OFF__
self.vehicle_id = vehicle_id or uuid.uuid4()
def __repr__(self):
return self.status
@property
def status(self):
return json.dumps(self.__dict__)
def hork_horn(self):
# TODO: Play a sound
return self.status
def switch_lights(self, action):
# if action not in [__OFF__, __ON__]: exception
self.lights = action
return self.status
class Garage(object):
_VEHICLES = {}
_INSTANCE = None
def create_vehicle(self, name, user_id):
vehicle = TeslaVehicle(name, user_id, choice(__COLOR__))
self._VEHICLES[str(vehicle.vehicle_id)] = vehicle
return vehicle
garage = Garage()
|
<commit_before><commit_msg>Add a basic vehicle model and garage<commit_after>"""
Tesla car model
"""
import uuid
import json
from random import choice
__OFF__ = 'off'
__ON__ = 'on'
__COLOR__ = ['black', 'white', 'red', 'brown', 'gold', 'pink']
JSONEncoder_old = json.JSONEncoder.default
def JSONEncoder_new(self, obj):
if isinstance(obj, uuid.UUID): return str(obj)
return JSONEncoder_old(self, obj)
json.JSONEncoder.default = JSONEncoder_new
class TeslaVehicle(object):
def __init__(self, name, user_id, color, vehicle_id=None, state=None):
self.color = color
self.name = name
self.state = state or __OFF__
self.user_id = user_id
self.lights = __OFF__
self.horn = __OFF__
self.vehicle_id = vehicle_id or uuid.uuid4()
def __repr__(self):
return self.status
@property
def status(self):
return json.dumps(self.__dict__)
def hork_horn(self):
# TODO: Play a sound
return self.status
def switch_lights(self, action):
# if action not in [__OFF__, __ON__]: exception
self.lights = action
return self.status
class Garage(object):
_VEHICLES = {}
_INSTANCE = None
def create_vehicle(self, name, user_id):
vehicle = TeslaVehicle(name, user_id, choice(__COLOR__))
self._VEHICLES[str(vehicle.vehicle_id)] = vehicle
return vehicle
garage = Garage()
|
|
0ce38e6dd892083e74113852424d1825f5dbf8bd
|
tools/integrate-redis-stats.py
|
tools/integrate-redis-stats.py
|
#!usr/bin/env python
import os
import sys
import datetime
import redis
from itertools import izip, izip_longest
# Workaround current bug in docutils:
# http://permalink.gmane.org/gmane.text.docutils.devel/6324
import docutils.utils
root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path = [root] + sys.path
import store
import config
redis = redis.Redis()
# Get our search for the previous hour keys
current = datetime.datetime.utcnow()
lasthour = current - datetime.timedelta(hours=1)
search = "downloads:hour:%s:*:*" % lasthour.strftime("%y-%m-%d-%H")
# Make sure we haven't integrated this already
if redis.sismember("downloads:integrated", search):
print("Already Integrated '%s'" % search)
sys.exit(0)
# Fetch all of the keys
keys = redis.keys(search)
if not keys:
print("No keys match '%s'" % search)
sys.exit(0)
# Fetch all of the download counts (in batches of 200)
counts = []
for batch in izip_longest(*[iter(keys)] * 200):
batch = [x for x in batch if x is not None]
counts.extend(redis.mget(*batch))
# Combine the keys with the counts
downloads = izip(
(int(y) for y in counts),
(x.split(":")[-1] for x in keys),
)
# Update the database
c = config.Config("/data/pypi/config.ini")
store = store.Store(c)
cursor = store.get_cursor()
cursor.executemany(
"UPDATE release_files SET downloads = downloads + %s WHERE filename = %s",
downloads,
)
cursor.commit()
cursor.close()
# Add this to our integrated set
redis.sadd("downloads:integrated", search)
|
Add a script that processes stats from Redis
|
Add a script that processes stats from Redis
|
Python
|
bsd-3-clause
|
pydotorg/pypi,pydotorg/pypi,pydotorg/pypi,pydotorg/pypi
|
Add a script that processes stats from Redis
|
#!usr/bin/env python
import os
import sys
import datetime
import redis
from itertools import izip, izip_longest
# Workaround current bug in docutils:
# http://permalink.gmane.org/gmane.text.docutils.devel/6324
import docutils.utils
root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path = [root] + sys.path
import store
import config
redis = redis.Redis()
# Get our search for the previous hour keys
current = datetime.datetime.utcnow()
lasthour = current - datetime.timedelta(hours=1)
search = "downloads:hour:%s:*:*" % lasthour.strftime("%y-%m-%d-%H")
# Make sure we haven't integrated this already
if redis.sismember("downloads:integrated", search):
print("Already Integrated '%s'" % search)
sys.exit(0)
# Fetch all of the keys
keys = redis.keys(search)
if not keys:
print("No keys match '%s'" % search)
sys.exit(0)
# Fetch all of the download counts (in batches of 200)
counts = []
for batch in izip_longest(*[iter(keys)] * 200):
batch = [x for x in batch if x is not None]
counts.extend(redis.mget(*batch))
# Combine the keys with the counts
downloads = izip(
(int(y) for y in counts),
(x.split(":")[-1] for x in keys),
)
# Update the database
c = config.Config("/data/pypi/config.ini")
store = store.Store(c)
cursor = store.get_cursor()
cursor.executemany(
"UPDATE release_files SET downloads = downloads + %s WHERE filename = %s",
downloads,
)
cursor.commit()
cursor.close()
# Add this to our integrated set
redis.sadd("downloads:integrated", search)
|
<commit_before><commit_msg>Add a script that processes stats from Redis<commit_after>
|
#!usr/bin/env python
import os
import sys
import datetime
import redis
from itertools import izip, izip_longest
# Workaround current bug in docutils:
# http://permalink.gmane.org/gmane.text.docutils.devel/6324
import docutils.utils
root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path = [root] + sys.path
import store
import config
redis = redis.Redis()
# Get our search for the previous hour keys
current = datetime.datetime.utcnow()
lasthour = current - datetime.timedelta(hours=1)
search = "downloads:hour:%s:*:*" % lasthour.strftime("%y-%m-%d-%H")
# Make sure we haven't integrated this already
if redis.sismember("downloads:integrated", search):
print("Already Integrated '%s'" % search)
sys.exit(0)
# Fetch all of the keys
keys = redis.keys(search)
if not keys:
print("No keys match '%s'" % search)
sys.exit(0)
# Fetch all of the download counts (in batches of 200)
counts = []
for batch in izip_longest(*[iter(keys)] * 200):
batch = [x for x in batch if x is not None]
counts.extend(redis.mget(*batch))
# Combine the keys with the counts
downloads = izip(
(int(y) for y in counts),
(x.split(":")[-1] for x in keys),
)
# Update the database
c = config.Config("/data/pypi/config.ini")
store = store.Store(c)
cursor = store.get_cursor()
cursor.executemany(
"UPDATE release_files SET downloads = downloads + %s WHERE filename = %s",
downloads,
)
cursor.commit()
cursor.close()
# Add this to our integrated set
redis.sadd("downloads:integrated", search)
|
Add a script that processes stats from Redis#!usr/bin/env python
import os
import sys
import datetime
import redis
from itertools import izip, izip_longest
# Workaround current bug in docutils:
# http://permalink.gmane.org/gmane.text.docutils.devel/6324
import docutils.utils
root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path = [root] + sys.path
import store
import config
redis = redis.Redis()
# Get our search for the previous hour keys
current = datetime.datetime.utcnow()
lasthour = current - datetime.timedelta(hours=1)
search = "downloads:hour:%s:*:*" % lasthour.strftime("%y-%m-%d-%H")
# Make sure we haven't integrated this already
if redis.sismember("downloads:integrated", search):
print("Already Integrated '%s'" % search)
sys.exit(0)
# Fetch all of the keys
keys = redis.keys(search)
if not keys:
print("No keys match '%s'" % search)
sys.exit(0)
# Fetch all of the download counts (in batches of 200)
counts = []
for batch in izip_longest(*[iter(keys)] * 200):
batch = [x for x in batch if x is not None]
counts.extend(redis.mget(*batch))
# Combine the keys with the counts
downloads = izip(
(int(y) for y in counts),
(x.split(":")[-1] for x in keys),
)
# Update the database
c = config.Config("/data/pypi/config.ini")
store = store.Store(c)
cursor = store.get_cursor()
cursor.executemany(
"UPDATE release_files SET downloads = downloads + %s WHERE filename = %s",
downloads,
)
cursor.commit()
cursor.close()
# Add this to our integrated set
redis.sadd("downloads:integrated", search)
|
<commit_before><commit_msg>Add a script that processes stats from Redis<commit_after>#!usr/bin/env python
import os
import sys
import datetime
import redis
from itertools import izip, izip_longest
# Workaround current bug in docutils:
# http://permalink.gmane.org/gmane.text.docutils.devel/6324
import docutils.utils
root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path = [root] + sys.path
import store
import config
redis = redis.Redis()
# Get our search for the previous hour keys
current = datetime.datetime.utcnow()
lasthour = current - datetime.timedelta(hours=1)
search = "downloads:hour:%s:*:*" % lasthour.strftime("%y-%m-%d-%H")
# Make sure we haven't integrated this already
if redis.sismember("downloads:integrated", search):
print("Already Integrated '%s'" % search)
sys.exit(0)
# Fetch all of the keys
keys = redis.keys(search)
if not keys:
print("No keys match '%s'" % search)
sys.exit(0)
# Fetch all of the download counts (in batches of 200)
counts = []
for batch in izip_longest(*[iter(keys)] * 200):
batch = [x for x in batch if x is not None]
counts.extend(redis.mget(*batch))
# Combine the keys with the counts
downloads = izip(
(int(y) for y in counts),
(x.split(":")[-1] for x in keys),
)
# Update the database
c = config.Config("/data/pypi/config.ini")
store = store.Store(c)
cursor = store.get_cursor()
cursor.executemany(
"UPDATE release_files SET downloads = downloads + %s WHERE filename = %s",
downloads,
)
cursor.commit()
cursor.close()
# Add this to our integrated set
redis.sadd("downloads:integrated", search)
|
|
f1932b77306e2c1138ec99b312f2db80d0078079
|
misc/python/nisttest.py
|
misc/python/nisttest.py
|
#!/usr/bin/python
import sys, re, os, botan
from os.path import join;
class TestResult(Exception):
def __init__(self, r):
self.result = r
def __str__(self):
return repr(self.result).replace('botan._botan.verify_result.', '')
def throw_unless_ok(r):
if r != botan.verify_result.verified:
raise TestResult(r)
def validate(ca_certs, certs, crls, ee_certs):
store = botan.X509_Store()
for cert in certs:
if cert not in ee_certs:
store.add_cert(botan.X509_Certificate(cert), cert in ca_certs)
for crl in crls:
throw_unless_ok(store.add_crl(botan.X509_CRL(crl)))
for ee in ee_certs:
throw_unless_ok(store.validate(botan.X509_Certificate(ee)))
raise TestResult(botan.verify_result.verified)
def main():
for root, dirs, files in os.walk('../nist_tests/tests'):
if files:
crls = [join(root,x) for x in files if x.endswith(".crl")]
certs = [join(root,x) for x in files if x.endswith(".crt")]
end_entity = [x for x in certs if x.find("End Cert") != -1]
ca_certs = [x for x in certs if x.find("Trust Anchor") != -1]
try:
validate(ca_certs, certs, crls, end_entity)
except TestResult, result:
print result
if __name__ == "__main__":
sys.exit(main())
|
Add an implementation of the NIST X.509 tests in Python
|
Add an implementation of the NIST X.509 tests in Python
|
Python
|
bsd-2-clause
|
randombit/botan,webmaster128/botan,webmaster128/botan,webmaster128/botan,Rohde-Schwarz-Cybersecurity/botan,randombit/botan,Rohde-Schwarz-Cybersecurity/botan,Rohde-Schwarz-Cybersecurity/botan,webmaster128/botan,randombit/botan,Rohde-Schwarz-Cybersecurity/botan,webmaster128/botan,randombit/botan,randombit/botan,Rohde-Schwarz-Cybersecurity/botan,Rohde-Schwarz-Cybersecurity/botan
|
Add an implementation of the NIST X.509 tests in Python
|
#!/usr/bin/python
import sys, re, os, botan
from os.path import join;
class TestResult(Exception):
def __init__(self, r):
self.result = r
def __str__(self):
return repr(self.result).replace('botan._botan.verify_result.', '')
def throw_unless_ok(r):
if r != botan.verify_result.verified:
raise TestResult(r)
def validate(ca_certs, certs, crls, ee_certs):
store = botan.X509_Store()
for cert in certs:
if cert not in ee_certs:
store.add_cert(botan.X509_Certificate(cert), cert in ca_certs)
for crl in crls:
throw_unless_ok(store.add_crl(botan.X509_CRL(crl)))
for ee in ee_certs:
throw_unless_ok(store.validate(botan.X509_Certificate(ee)))
raise TestResult(botan.verify_result.verified)
def main():
for root, dirs, files in os.walk('../nist_tests/tests'):
if files:
crls = [join(root,x) for x in files if x.endswith(".crl")]
certs = [join(root,x) for x in files if x.endswith(".crt")]
end_entity = [x for x in certs if x.find("End Cert") != -1]
ca_certs = [x for x in certs if x.find("Trust Anchor") != -1]
try:
validate(ca_certs, certs, crls, end_entity)
except TestResult, result:
print result
if __name__ == "__main__":
sys.exit(main())
|
<commit_before><commit_msg>Add an implementation of the NIST X.509 tests in Python<commit_after>
|
#!/usr/bin/python
import sys, re, os, botan
from os.path import join;
class TestResult(Exception):
def __init__(self, r):
self.result = r
def __str__(self):
return repr(self.result).replace('botan._botan.verify_result.', '')
def throw_unless_ok(r):
if r != botan.verify_result.verified:
raise TestResult(r)
def validate(ca_certs, certs, crls, ee_certs):
store = botan.X509_Store()
for cert in certs:
if cert not in ee_certs:
store.add_cert(botan.X509_Certificate(cert), cert in ca_certs)
for crl in crls:
throw_unless_ok(store.add_crl(botan.X509_CRL(crl)))
for ee in ee_certs:
throw_unless_ok(store.validate(botan.X509_Certificate(ee)))
raise TestResult(botan.verify_result.verified)
def main():
for root, dirs, files in os.walk('../nist_tests/tests'):
if files:
crls = [join(root,x) for x in files if x.endswith(".crl")]
certs = [join(root,x) for x in files if x.endswith(".crt")]
end_entity = [x for x in certs if x.find("End Cert") != -1]
ca_certs = [x for x in certs if x.find("Trust Anchor") != -1]
try:
validate(ca_certs, certs, crls, end_entity)
except TestResult, result:
print result
if __name__ == "__main__":
sys.exit(main())
|
Add an implementation of the NIST X.509 tests in Python#!/usr/bin/python
import sys, re, os, botan
from os.path import join;
class TestResult(Exception):
def __init__(self, r):
self.result = r
def __str__(self):
return repr(self.result).replace('botan._botan.verify_result.', '')
def throw_unless_ok(r):
if r != botan.verify_result.verified:
raise TestResult(r)
def validate(ca_certs, certs, crls, ee_certs):
store = botan.X509_Store()
for cert in certs:
if cert not in ee_certs:
store.add_cert(botan.X509_Certificate(cert), cert in ca_certs)
for crl in crls:
throw_unless_ok(store.add_crl(botan.X509_CRL(crl)))
for ee in ee_certs:
throw_unless_ok(store.validate(botan.X509_Certificate(ee)))
raise TestResult(botan.verify_result.verified)
def main():
for root, dirs, files in os.walk('../nist_tests/tests'):
if files:
crls = [join(root,x) for x in files if x.endswith(".crl")]
certs = [join(root,x) for x in files if x.endswith(".crt")]
end_entity = [x for x in certs if x.find("End Cert") != -1]
ca_certs = [x for x in certs if x.find("Trust Anchor") != -1]
try:
validate(ca_certs, certs, crls, end_entity)
except TestResult, result:
print result
if __name__ == "__main__":
sys.exit(main())
|
<commit_before><commit_msg>Add an implementation of the NIST X.509 tests in Python<commit_after>#!/usr/bin/python
import sys, re, os, botan
from os.path import join;
class TestResult(Exception):
def __init__(self, r):
self.result = r
def __str__(self):
return repr(self.result).replace('botan._botan.verify_result.', '')
def throw_unless_ok(r):
if r != botan.verify_result.verified:
raise TestResult(r)
def validate(ca_certs, certs, crls, ee_certs):
store = botan.X509_Store()
for cert in certs:
if cert not in ee_certs:
store.add_cert(botan.X509_Certificate(cert), cert in ca_certs)
for crl in crls:
throw_unless_ok(store.add_crl(botan.X509_CRL(crl)))
for ee in ee_certs:
throw_unless_ok(store.validate(botan.X509_Certificate(ee)))
raise TestResult(botan.verify_result.verified)
def main():
for root, dirs, files in os.walk('../nist_tests/tests'):
if files:
crls = [join(root,x) for x in files if x.endswith(".crl")]
certs = [join(root,x) for x in files if x.endswith(".crt")]
end_entity = [x for x in certs if x.find("End Cert") != -1]
ca_certs = [x for x in certs if x.find("Trust Anchor") != -1]
try:
validate(ca_certs, certs, crls, end_entity)
except TestResult, result:
print result
if __name__ == "__main__":
sys.exit(main())
|
|
b3657c48b3958f57a1d71e296570566d7924d7ca
|
recipes/omas/run_test.py
|
recipes/omas/run_test.py
|
import omas, os
os.environ['USER'] = 'TEST_CONDA_USER'
omas.test_omas_suite()
|
import omas, os
if 'USER' not in os.environ:
os.environ['USER'] = 'TEST_CONDA_USER'
if 'HOME' not in os.environ:
os.environ['HOME'] = '/tmp'
omas.test_omas_suite()
|
Make test more robust to missing USER and HOME
|
omas: Make test more robust to missing USER and HOME
|
Python
|
bsd-3-clause
|
Cashalow/staged-recipes,kwilcox/staged-recipes,patricksnape/staged-recipes,guillochon/staged-recipes,birdsarah/staged-recipes,basnijholt/staged-recipes,mariusvniekerk/staged-recipes,mcs07/staged-recipes,Juanlu001/staged-recipes,petrushy/staged-recipes,ocefpaf/staged-recipes,ceholden/staged-recipes,Juanlu001/staged-recipes,jjhelmus/staged-recipes,basnijholt/staged-recipes,chrisburr/staged-recipes,jochym/staged-recipes,johanneskoester/staged-recipes,ReimarBauer/staged-recipes,mariusvniekerk/staged-recipes,kwilcox/staged-recipes,shadowwalkersb/staged-recipes,jjhelmus/staged-recipes,pmlandwehr/staged-recipes,jochym/staged-recipes,synapticarbors/staged-recipes,hadim/staged-recipes,scopatz/staged-recipes,barkls/staged-recipes,dschreij/staged-recipes,conda-forge/staged-recipes,asmeurer/staged-recipes,jakirkham/staged-recipes,pmlandwehr/staged-recipes,cpaulik/staged-recipes,goanpeca/staged-recipes,cpaulik/staged-recipes,Cashalow/staged-recipes,ocefpaf/staged-recipes,rvalieris/staged-recipes,guillochon/staged-recipes,dschreij/staged-recipes,johanneskoester/staged-recipes,NOAA-ORR-ERD/staged-recipes,barkls/staged-recipes,hadim/staged-recipes,ReimarBauer/staged-recipes,mcs07/staged-recipes,SylvainCorlay/staged-recipes,synapticarbors/staged-recipes,igortg/staged-recipes,sannykr/staged-recipes,glemaitre/staged-recipes,scopatz/staged-recipes,sodre/staged-recipes,rvalieris/staged-recipes,chrisburr/staged-recipes,rmcgibbo/staged-recipes,petrushy/staged-recipes,SylvainCorlay/staged-recipes,sodre/staged-recipes,conda-forge/staged-recipes,patricksnape/staged-recipes,isuruf/staged-recipes,goanpeca/staged-recipes,NOAA-ORR-ERD/staged-recipes,ceholden/staged-recipes,sodre/staged-recipes,stuertz/staged-recipes,isuruf/staged-recipes,rmcgibbo/staged-recipes,asmeurer/staged-recipes,birdsarah/staged-recipes,jakirkham/staged-recipes,sannykr/staged-recipes,stuertz/staged-recipes,igortg/staged-recipes,shadowwalkersb/staged-recipes,glemaitre/staged-recipes
|
import omas, os
os.environ['USER'] = 'TEST_CONDA_USER'
omas.test_omas_suite()
omas: Make test more robust to missing USER and HOME
|
import omas, os
if 'USER' not in os.environ:
os.environ['USER'] = 'TEST_CONDA_USER'
if 'HOME' not in os.environ:
os.environ['HOME'] = '/tmp'
omas.test_omas_suite()
|
<commit_before>import omas, os
os.environ['USER'] = 'TEST_CONDA_USER'
omas.test_omas_suite()
<commit_msg>omas: Make test more robust to missing USER and HOME<commit_after>
|
import omas, os
if 'USER' not in os.environ:
os.environ['USER'] = 'TEST_CONDA_USER'
if 'HOME' not in os.environ:
os.environ['HOME'] = '/tmp'
omas.test_omas_suite()
|
import omas, os
os.environ['USER'] = 'TEST_CONDA_USER'
omas.test_omas_suite()
omas: Make test more robust to missing USER and HOMEimport omas, os
if 'USER' not in os.environ:
os.environ['USER'] = 'TEST_CONDA_USER'
if 'HOME' not in os.environ:
os.environ['HOME'] = '/tmp'
omas.test_omas_suite()
|
<commit_before>import omas, os
os.environ['USER'] = 'TEST_CONDA_USER'
omas.test_omas_suite()
<commit_msg>omas: Make test more robust to missing USER and HOME<commit_after>import omas, os
if 'USER' not in os.environ:
os.environ['USER'] = 'TEST_CONDA_USER'
if 'HOME' not in os.environ:
os.environ['HOME'] = '/tmp'
omas.test_omas_suite()
|
b1adfe977ae272546d5bb10f2a4472cc3ae17667
|
examples/glyphs/data_tables_server.py
|
examples/glyphs/data_tables_server.py
|
from __future__ import print_function
from bokeh.objects import ColumnDataSource
from bokeh.widgetobjects import TableColumn, HandsonTable, PivotTable, HBox
from bokeh.session import PlotServerSession
from bokeh.sampledata.autompg import autompg
source = ColumnDataSource(autompg)
fields = zip(autompg.columns, map(str, autompg.dtypes))
columns = [ TableColumn(data=column, type="string" if dtype == "object" else "numeric", header=column) for column, dtype in fields ]
data_table = HandsonTable(source=source, columns=columns)
pivot_table = PivotTable(source=source, fields=[ dict(name=field, dtype=dtype) for field, dtype in fields ])
hbox = HBox(children=[data_table, pivot_table])
try:
session = PlotServerSession(serverloc="http://localhost:5006", username="defaultuser", userapikey="nokey")
except requests.exceptions.ConnectionError:
print("ERROR: This example requires the plot server. Please make sure plot server is running, by executing 'bokeh-server'")
sys.exit(1)
session.use_doc('data_tables_server')
session.add_plot(hbox)
session.store_all()
if __name__ == "__main__":
print("\nPlease visit http://localhost:5006/bokeh to see the plots")
|
Add simple example showing HandsonTable and PivotTable
|
Add simple example showing HandsonTable and PivotTable
|
Python
|
bsd-3-clause
|
PythonCharmers/bokeh,evidation-health/bokeh,maxalbert/bokeh,ChinaQuants/bokeh,rothnic/bokeh,percyfal/bokeh,CrazyGuo/bokeh,lukebarnard1/bokeh,timothydmorton/bokeh,aiguofer/bokeh,deeplook/bokeh,timsnyder/bokeh,draperjames/bokeh,paultcochrane/bokeh,muku42/bokeh,tacaswell/bokeh,xguse/bokeh,mindriot101/bokeh,bokeh/bokeh,alan-unravel/bokeh,aavanian/bokeh,mutirri/bokeh,lukebarnard1/bokeh,rhiever/bokeh,ericdill/bokeh,paultcochrane/bokeh,clairetang6/bokeh,dennisobrien/bokeh,carlvlewis/bokeh,birdsarah/bokeh,phobson/bokeh,htygithub/bokeh,stuart-knock/bokeh,bsipocz/bokeh,tacaswell/bokeh,sahat/bokeh,matbra/bokeh,eteq/bokeh,rs2/bokeh,jakirkham/bokeh,paultcochrane/bokeh,evidation-health/bokeh,CrazyGuo/bokeh,philippjfr/bokeh,timothydmorton/bokeh,almarklein/bokeh,deeplook/bokeh,satishgoda/bokeh,abele/bokeh,jakirkham/bokeh,mutirri/bokeh,aiguofer/bokeh,matbra/bokeh,carlvlewis/bokeh,PythonCharmers/bokeh,srinathv/bokeh,roxyboy/bokeh,justacec/bokeh,laurent-george/bokeh,ptitjano/bokeh,ChristosChristofidis/bokeh,bsipocz/bokeh,KasperPRasmussen/bokeh,almarklein/bokeh,phobson/bokeh,carlvlewis/bokeh,aavanian/bokeh,rothnic/bokeh,DuCorey/bokeh,gpfreitas/bokeh,daodaoliang/bokeh,matbra/bokeh,caseyclements/bokeh,sahat/bokeh,schoolie/bokeh,roxyboy/bokeh,bokeh/bokeh,caseyclements/bokeh,percyfal/bokeh,roxyboy/bokeh,canavandl/bokeh,ericdill/bokeh,gpfreitas/bokeh,maxalbert/bokeh,ptitjano/bokeh,ptitjano/bokeh,schoolie/bokeh,satishgoda/bokeh,bokeh/bokeh,CrazyGuo/bokeh,dennisobrien/bokeh,bsipocz/bokeh,DuCorey/bokeh,stonebig/bokeh,aiguofer/bokeh,birdsarah/bokeh,birdsarah/bokeh,aavanian/bokeh,ahmadia/bokeh,timsnyder/bokeh,jakirkham/bokeh,evidation-health/bokeh,alan-unravel/bokeh,almarklein/bokeh,mutirri/bokeh,tacaswell/bokeh,ChinaQuants/bokeh,clairetang6/bokeh,canavandl/bokeh,ericdill/bokeh,saifrahmed/bokeh,DuCorey/bokeh,justacec/bokeh,muku42/bokeh,daodaoliang/bokeh,saifrahmed/bokeh,KasperPRasmussen/bokeh,azjps/bokeh,aavanian/bokeh,awanke/bokeh,timothydmorton/bokeh,ptitjano/bokeh,jplourenco/bokeh,mindriot101/bokeh,awanke/bokeh,Karel-van-de-Plassche/bokeh,rs2/bokeh,josherick/bokeh,awanke/bokeh,josherick/bokeh,alan-unravel/bokeh,daodaoliang/bokeh,muku42/bokeh,Karel-van-de-Plassche/bokeh,akloster/bokeh,timothydmorton/bokeh,stuart-knock/bokeh,ChristosChristofidis/bokeh,DuCorey/bokeh,Karel-van-de-Plassche/bokeh,srinathv/bokeh,josherick/bokeh,phobson/bokeh,ChinaQuants/bokeh,bsipocz/bokeh,canavandl/bokeh,deeplook/bokeh,stonebig/bokeh,awanke/bokeh,lukebarnard1/bokeh,evidation-health/bokeh,paultcochrane/bokeh,eteq/bokeh,azjps/bokeh,timsnyder/bokeh,dennisobrien/bokeh,schoolie/bokeh,maxalbert/bokeh,phobson/bokeh,khkaminska/bokeh,canavandl/bokeh,azjps/bokeh,roxyboy/bokeh,azjps/bokeh,draperjames/bokeh,philippjfr/bokeh,PythonCharmers/bokeh,ChristosChristofidis/bokeh,KasperPRasmussen/bokeh,KasperPRasmussen/bokeh,carlvlewis/bokeh,stuart-knock/bokeh,clairetang6/bokeh,ericmjl/bokeh,mindriot101/bokeh,draperjames/bokeh,dennisobrien/bokeh,htygithub/bokeh,jakirkham/bokeh,CrazyGuo/bokeh,josherick/bokeh,tacaswell/bokeh,htygithub/bokeh,justacec/bokeh,jplourenco/bokeh,laurent-george/bokeh,justacec/bokeh,stuart-knock/bokeh,philippjfr/bokeh,rhiever/bokeh,laurent-george/bokeh,rhiever/bokeh,rhiever/bokeh,matbra/bokeh,rs2/bokeh,ahmadia/bokeh,daodaoliang/bokeh,mutirri/bokeh,rs2/bokeh,schoolie/bokeh,jakirkham/bokeh,ericdill/bokeh,akloster/bokeh,phobson/bokeh,saifrahmed/bokeh,caseyclements/bokeh,laurent-george/bokeh,philippjfr/bokeh,srinathv/bokeh,percyfal/bokeh,muku42/bokeh,caseyclements/bokeh,timsnyder/bokeh,alan-unravel/bokeh,lukebarnard1/bokeh,ericmjl/bokeh,msarahan/bokeh,stonebig/bokeh,bokeh/bokeh,quasiben/bokeh,azjps/bokeh,percyfal/bokeh,srinathv/bokeh,quasiben/bokeh,abele/bokeh,msarahan/bokeh,schoolie/bokeh,gpfreitas/bokeh,sahat/bokeh,ahmadia/bokeh,philippjfr/bokeh,stonebig/bokeh,saifrahmed/bokeh,gpfreitas/bokeh,KasperPRasmussen/bokeh,msarahan/bokeh,xguse/bokeh,deeplook/bokeh,PythonCharmers/bokeh,rs2/bokeh,ericmjl/bokeh,xguse/bokeh,dennisobrien/bokeh,aiguofer/bokeh,abele/bokeh,ChinaQuants/bokeh,ChristosChristofidis/bokeh,jplourenco/bokeh,eteq/bokeh,mindriot101/bokeh,khkaminska/bokeh,bokeh/bokeh,htygithub/bokeh,rothnic/bokeh,jplourenco/bokeh,satishgoda/bokeh,ericmjl/bokeh,msarahan/bokeh,draperjames/bokeh,ericmjl/bokeh,timsnyder/bokeh,ptitjano/bokeh,khkaminska/bokeh,eteq/bokeh,aiguofer/bokeh,draperjames/bokeh,satishgoda/bokeh,khkaminska/bokeh,akloster/bokeh,rothnic/bokeh,xguse/bokeh,DuCorey/bokeh,maxalbert/bokeh,aavanian/bokeh,abele/bokeh,akloster/bokeh,quasiben/bokeh,Karel-van-de-Plassche/bokeh,percyfal/bokeh,birdsarah/bokeh,ahmadia/bokeh,Karel-van-de-Plassche/bokeh,clairetang6/bokeh
|
Add simple example showing HandsonTable and PivotTable
|
from __future__ import print_function
from bokeh.objects import ColumnDataSource
from bokeh.widgetobjects import TableColumn, HandsonTable, PivotTable, HBox
from bokeh.session import PlotServerSession
from bokeh.sampledata.autompg import autompg
source = ColumnDataSource(autompg)
fields = zip(autompg.columns, map(str, autompg.dtypes))
columns = [ TableColumn(data=column, type="string" if dtype == "object" else "numeric", header=column) for column, dtype in fields ]
data_table = HandsonTable(source=source, columns=columns)
pivot_table = PivotTable(source=source, fields=[ dict(name=field, dtype=dtype) for field, dtype in fields ])
hbox = HBox(children=[data_table, pivot_table])
try:
session = PlotServerSession(serverloc="http://localhost:5006", username="defaultuser", userapikey="nokey")
except requests.exceptions.ConnectionError:
print("ERROR: This example requires the plot server. Please make sure plot server is running, by executing 'bokeh-server'")
sys.exit(1)
session.use_doc('data_tables_server')
session.add_plot(hbox)
session.store_all()
if __name__ == "__main__":
print("\nPlease visit http://localhost:5006/bokeh to see the plots")
|
<commit_before><commit_msg>Add simple example showing HandsonTable and PivotTable<commit_after>
|
from __future__ import print_function
from bokeh.objects import ColumnDataSource
from bokeh.widgetobjects import TableColumn, HandsonTable, PivotTable, HBox
from bokeh.session import PlotServerSession
from bokeh.sampledata.autompg import autompg
source = ColumnDataSource(autompg)
fields = zip(autompg.columns, map(str, autompg.dtypes))
columns = [ TableColumn(data=column, type="string" if dtype == "object" else "numeric", header=column) for column, dtype in fields ]
data_table = HandsonTable(source=source, columns=columns)
pivot_table = PivotTable(source=source, fields=[ dict(name=field, dtype=dtype) for field, dtype in fields ])
hbox = HBox(children=[data_table, pivot_table])
try:
session = PlotServerSession(serverloc="http://localhost:5006", username="defaultuser", userapikey="nokey")
except requests.exceptions.ConnectionError:
print("ERROR: This example requires the plot server. Please make sure plot server is running, by executing 'bokeh-server'")
sys.exit(1)
session.use_doc('data_tables_server')
session.add_plot(hbox)
session.store_all()
if __name__ == "__main__":
print("\nPlease visit http://localhost:5006/bokeh to see the plots")
|
Add simple example showing HandsonTable and PivotTablefrom __future__ import print_function
from bokeh.objects import ColumnDataSource
from bokeh.widgetobjects import TableColumn, HandsonTable, PivotTable, HBox
from bokeh.session import PlotServerSession
from bokeh.sampledata.autompg import autompg
source = ColumnDataSource(autompg)
fields = zip(autompg.columns, map(str, autompg.dtypes))
columns = [ TableColumn(data=column, type="string" if dtype == "object" else "numeric", header=column) for column, dtype in fields ]
data_table = HandsonTable(source=source, columns=columns)
pivot_table = PivotTable(source=source, fields=[ dict(name=field, dtype=dtype) for field, dtype in fields ])
hbox = HBox(children=[data_table, pivot_table])
try:
session = PlotServerSession(serverloc="http://localhost:5006", username="defaultuser", userapikey="nokey")
except requests.exceptions.ConnectionError:
print("ERROR: This example requires the plot server. Please make sure plot server is running, by executing 'bokeh-server'")
sys.exit(1)
session.use_doc('data_tables_server')
session.add_plot(hbox)
session.store_all()
if __name__ == "__main__":
print("\nPlease visit http://localhost:5006/bokeh to see the plots")
|
<commit_before><commit_msg>Add simple example showing HandsonTable and PivotTable<commit_after>from __future__ import print_function
from bokeh.objects import ColumnDataSource
from bokeh.widgetobjects import TableColumn, HandsonTable, PivotTable, HBox
from bokeh.session import PlotServerSession
from bokeh.sampledata.autompg import autompg
source = ColumnDataSource(autompg)
fields = zip(autompg.columns, map(str, autompg.dtypes))
columns = [ TableColumn(data=column, type="string" if dtype == "object" else "numeric", header=column) for column, dtype in fields ]
data_table = HandsonTable(source=source, columns=columns)
pivot_table = PivotTable(source=source, fields=[ dict(name=field, dtype=dtype) for field, dtype in fields ])
hbox = HBox(children=[data_table, pivot_table])
try:
session = PlotServerSession(serverloc="http://localhost:5006", username="defaultuser", userapikey="nokey")
except requests.exceptions.ConnectionError:
print("ERROR: This example requires the plot server. Please make sure plot server is running, by executing 'bokeh-server'")
sys.exit(1)
session.use_doc('data_tables_server')
session.add_plot(hbox)
session.store_all()
if __name__ == "__main__":
print("\nPlease visit http://localhost:5006/bokeh to see the plots")
|
|
0e40eeb87504c896917fcbd9cc718c9953fed94f
|
finding-geodesic-basins-with-scipy.py
|
finding-geodesic-basins-with-scipy.py
|
# IPython log file
from skimage import graph
image = np.array([[1, 1, 2, 2], [2, 1, 1, 3], [3, 2, 1, 2], [2, 2, 2, 1]])
mcp = graph.MCP_Geometric(image)
destinations = [[0, 0], [3, 3]]
costs, traceback = mcp.find_costs(destinations)
type(traceback)
print(traceback)
graph.
mcp.traceback
get_ipython().run_line_magic('pinfo', 'mcp.traceback')
mcp.traceback([0, 0])
mcp.traceback([0, 3])
mcp.offsets
mcp
from skimage.graph import _mcp
offsets = _mcp.make_offsets(2, True)
offsets
traceback
indices = np.indices(traceback.shape)
offsets.append([0, 0])
indices.shape
offsets_arr = np.array(offsets)
offsets_arr.shape
offset_to_neighbor = offsets_arr[traceback]
offset_to_neighbor.shape
neighbor_index = indices + offset_to_neighbor.transpose((2, 0, 1))
neighbor_index = indices - offset_to_neighbor.transpose((2, 0, 1))
get_ipython().run_line_magic('pinfo', 'np.ravel_multi_index')
ids = np.arange(traceback.size).reshape(image.shape)
neighbor_ids = np.ravel_multi_index(tuple(neighbor_index), traceback.shape)
ids
neighbor_ids
from scipy import sparse
g = sparse.coo_matrix((ids.ravel(), neighbor_ids.ravel(), np.ones(traceback.size))).to_csr()
get_ipython().run_line_magic('pinfo', 'sparse.coo_matrix')
g = sparse.coo_matrix((np.ones(traceback.size), (ids.ravel(), neighbor_ids.ravel()))).to_csr()
g = sparse.coo_matrix((np.ones(traceback.size), (ids.ravel(), neighbor_ids.ravel()))).tocsr()
get_ipython().run_line_magic('pinfo', 'sparse.csgraph.connected_components')
sparse.csgraph.connected_components(g)[1].reshape((4, 4))
neighbor_ids
ids
print(indices.shape)
print(ids)
print(neighbor_ids)
print _39
print(_39)
print(costs)
g2 = sparse.coo_matrix((np.ones(traceback.size), (ids.flat, neighbor_ids.flat))).tocsr()
g2.shape
sparse.csgraph.connected_components(g2)[1].reshape((4, 4))
|
Add raw history from geodesic basins
|
Add raw history from geodesic basins
|
Python
|
bsd-3-clause
|
jni/useful-histories
|
Add raw history from geodesic basins
|
# IPython log file
from skimage import graph
image = np.array([[1, 1, 2, 2], [2, 1, 1, 3], [3, 2, 1, 2], [2, 2, 2, 1]])
mcp = graph.MCP_Geometric(image)
destinations = [[0, 0], [3, 3]]
costs, traceback = mcp.find_costs(destinations)
type(traceback)
print(traceback)
graph.
mcp.traceback
get_ipython().run_line_magic('pinfo', 'mcp.traceback')
mcp.traceback([0, 0])
mcp.traceback([0, 3])
mcp.offsets
mcp
from skimage.graph import _mcp
offsets = _mcp.make_offsets(2, True)
offsets
traceback
indices = np.indices(traceback.shape)
offsets.append([0, 0])
indices.shape
offsets_arr = np.array(offsets)
offsets_arr.shape
offset_to_neighbor = offsets_arr[traceback]
offset_to_neighbor.shape
neighbor_index = indices + offset_to_neighbor.transpose((2, 0, 1))
neighbor_index = indices - offset_to_neighbor.transpose((2, 0, 1))
get_ipython().run_line_magic('pinfo', 'np.ravel_multi_index')
ids = np.arange(traceback.size).reshape(image.shape)
neighbor_ids = np.ravel_multi_index(tuple(neighbor_index), traceback.shape)
ids
neighbor_ids
from scipy import sparse
g = sparse.coo_matrix((ids.ravel(), neighbor_ids.ravel(), np.ones(traceback.size))).to_csr()
get_ipython().run_line_magic('pinfo', 'sparse.coo_matrix')
g = sparse.coo_matrix((np.ones(traceback.size), (ids.ravel(), neighbor_ids.ravel()))).to_csr()
g = sparse.coo_matrix((np.ones(traceback.size), (ids.ravel(), neighbor_ids.ravel()))).tocsr()
get_ipython().run_line_magic('pinfo', 'sparse.csgraph.connected_components')
sparse.csgraph.connected_components(g)[1].reshape((4, 4))
neighbor_ids
ids
print(indices.shape)
print(ids)
print(neighbor_ids)
print _39
print(_39)
print(costs)
g2 = sparse.coo_matrix((np.ones(traceback.size), (ids.flat, neighbor_ids.flat))).tocsr()
g2.shape
sparse.csgraph.connected_components(g2)[1].reshape((4, 4))
|
<commit_before><commit_msg>Add raw history from geodesic basins<commit_after>
|
# IPython log file
from skimage import graph
image = np.array([[1, 1, 2, 2], [2, 1, 1, 3], [3, 2, 1, 2], [2, 2, 2, 1]])
mcp = graph.MCP_Geometric(image)
destinations = [[0, 0], [3, 3]]
costs, traceback = mcp.find_costs(destinations)
type(traceback)
print(traceback)
graph.
mcp.traceback
get_ipython().run_line_magic('pinfo', 'mcp.traceback')
mcp.traceback([0, 0])
mcp.traceback([0, 3])
mcp.offsets
mcp
from skimage.graph import _mcp
offsets = _mcp.make_offsets(2, True)
offsets
traceback
indices = np.indices(traceback.shape)
offsets.append([0, 0])
indices.shape
offsets_arr = np.array(offsets)
offsets_arr.shape
offset_to_neighbor = offsets_arr[traceback]
offset_to_neighbor.shape
neighbor_index = indices + offset_to_neighbor.transpose((2, 0, 1))
neighbor_index = indices - offset_to_neighbor.transpose((2, 0, 1))
get_ipython().run_line_magic('pinfo', 'np.ravel_multi_index')
ids = np.arange(traceback.size).reshape(image.shape)
neighbor_ids = np.ravel_multi_index(tuple(neighbor_index), traceback.shape)
ids
neighbor_ids
from scipy import sparse
g = sparse.coo_matrix((ids.ravel(), neighbor_ids.ravel(), np.ones(traceback.size))).to_csr()
get_ipython().run_line_magic('pinfo', 'sparse.coo_matrix')
g = sparse.coo_matrix((np.ones(traceback.size), (ids.ravel(), neighbor_ids.ravel()))).to_csr()
g = sparse.coo_matrix((np.ones(traceback.size), (ids.ravel(), neighbor_ids.ravel()))).tocsr()
get_ipython().run_line_magic('pinfo', 'sparse.csgraph.connected_components')
sparse.csgraph.connected_components(g)[1].reshape((4, 4))
neighbor_ids
ids
print(indices.shape)
print(ids)
print(neighbor_ids)
print _39
print(_39)
print(costs)
g2 = sparse.coo_matrix((np.ones(traceback.size), (ids.flat, neighbor_ids.flat))).tocsr()
g2.shape
sparse.csgraph.connected_components(g2)[1].reshape((4, 4))
|
Add raw history from geodesic basins# IPython log file
from skimage import graph
image = np.array([[1, 1, 2, 2], [2, 1, 1, 3], [3, 2, 1, 2], [2, 2, 2, 1]])
mcp = graph.MCP_Geometric(image)
destinations = [[0, 0], [3, 3]]
costs, traceback = mcp.find_costs(destinations)
type(traceback)
print(traceback)
graph.
mcp.traceback
get_ipython().run_line_magic('pinfo', 'mcp.traceback')
mcp.traceback([0, 0])
mcp.traceback([0, 3])
mcp.offsets
mcp
from skimage.graph import _mcp
offsets = _mcp.make_offsets(2, True)
offsets
traceback
indices = np.indices(traceback.shape)
offsets.append([0, 0])
indices.shape
offsets_arr = np.array(offsets)
offsets_arr.shape
offset_to_neighbor = offsets_arr[traceback]
offset_to_neighbor.shape
neighbor_index = indices + offset_to_neighbor.transpose((2, 0, 1))
neighbor_index = indices - offset_to_neighbor.transpose((2, 0, 1))
get_ipython().run_line_magic('pinfo', 'np.ravel_multi_index')
ids = np.arange(traceback.size).reshape(image.shape)
neighbor_ids = np.ravel_multi_index(tuple(neighbor_index), traceback.shape)
ids
neighbor_ids
from scipy import sparse
g = sparse.coo_matrix((ids.ravel(), neighbor_ids.ravel(), np.ones(traceback.size))).to_csr()
get_ipython().run_line_magic('pinfo', 'sparse.coo_matrix')
g = sparse.coo_matrix((np.ones(traceback.size), (ids.ravel(), neighbor_ids.ravel()))).to_csr()
g = sparse.coo_matrix((np.ones(traceback.size), (ids.ravel(), neighbor_ids.ravel()))).tocsr()
get_ipython().run_line_magic('pinfo', 'sparse.csgraph.connected_components')
sparse.csgraph.connected_components(g)[1].reshape((4, 4))
neighbor_ids
ids
print(indices.shape)
print(ids)
print(neighbor_ids)
print _39
print(_39)
print(costs)
g2 = sparse.coo_matrix((np.ones(traceback.size), (ids.flat, neighbor_ids.flat))).tocsr()
g2.shape
sparse.csgraph.connected_components(g2)[1].reshape((4, 4))
|
<commit_before><commit_msg>Add raw history from geodesic basins<commit_after># IPython log file
from skimage import graph
image = np.array([[1, 1, 2, 2], [2, 1, 1, 3], [3, 2, 1, 2], [2, 2, 2, 1]])
mcp = graph.MCP_Geometric(image)
destinations = [[0, 0], [3, 3]]
costs, traceback = mcp.find_costs(destinations)
type(traceback)
print(traceback)
graph.
mcp.traceback
get_ipython().run_line_magic('pinfo', 'mcp.traceback')
mcp.traceback([0, 0])
mcp.traceback([0, 3])
mcp.offsets
mcp
from skimage.graph import _mcp
offsets = _mcp.make_offsets(2, True)
offsets
traceback
indices = np.indices(traceback.shape)
offsets.append([0, 0])
indices.shape
offsets_arr = np.array(offsets)
offsets_arr.shape
offset_to_neighbor = offsets_arr[traceback]
offset_to_neighbor.shape
neighbor_index = indices + offset_to_neighbor.transpose((2, 0, 1))
neighbor_index = indices - offset_to_neighbor.transpose((2, 0, 1))
get_ipython().run_line_magic('pinfo', 'np.ravel_multi_index')
ids = np.arange(traceback.size).reshape(image.shape)
neighbor_ids = np.ravel_multi_index(tuple(neighbor_index), traceback.shape)
ids
neighbor_ids
from scipy import sparse
g = sparse.coo_matrix((ids.ravel(), neighbor_ids.ravel(), np.ones(traceback.size))).to_csr()
get_ipython().run_line_magic('pinfo', 'sparse.coo_matrix')
g = sparse.coo_matrix((np.ones(traceback.size), (ids.ravel(), neighbor_ids.ravel()))).to_csr()
g = sparse.coo_matrix((np.ones(traceback.size), (ids.ravel(), neighbor_ids.ravel()))).tocsr()
get_ipython().run_line_magic('pinfo', 'sparse.csgraph.connected_components')
sparse.csgraph.connected_components(g)[1].reshape((4, 4))
neighbor_ids
ids
print(indices.shape)
print(ids)
print(neighbor_ids)
print _39
print(_39)
print(costs)
g2 = sparse.coo_matrix((np.ones(traceback.size), (ids.flat, neighbor_ids.flat))).tocsr()
g2.shape
sparse.csgraph.connected_components(g2)[1].reshape((4, 4))
|
|
faf4d4d4a80456caf39fbc547763f80bee50acee
|
webkit/tools/layout_tests/test_types/fuzzy_image_diff.py
|
webkit/tools/layout_tests/test_types/fuzzy_image_diff.py
|
# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Compares the image output of a test to the expected image output using
fuzzy matching.
"""
import errno
import logging
import os
import shutil
import subprocess
from layout_package import path_utils
from layout_package import test_failures
from test_types import test_type_base
class FuzzyImageDiff(test_type_base.TestTypeBase):
def CompareOutput(self, filename, proc, output, test_args):
"""Implementation of CompareOutput that checks the output image and
checksum against the expected files from the LayoutTest directory.
"""
failures = []
# If we didn't produce a hash file, this test must be text-only.
if test_args.hash is None:
return failures
expected_png_file = path_utils.ExpectedFilename(filename,
'.png',
self._platform)
if test_args.show_sources:
logging.debug('Using %s' % expected_png_file)
# Also report a missing expected PNG file.
if not os.path.isfile(expected_png_file):
failures.append(test_failures.FailureMissingImage(self))
# Run the fuzzymatcher
r = subprocess.call(['fuzzymatch', test_args.png_path, expected_png_file])
print ' fuzzymatch returned', r
if r != 0:
failures.append(test_failures.FailureFuzzyFailure(self))
return failures
|
Fix build: missed a file
|
Fix build: missed a file
TBR=tony
Review URL: http://codereview.chromium.org/13209
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@6449 0039d316-1c4b-4281-b951-d872f2087c98
|
Python
|
bsd-3-clause
|
junmin-zhu/chromium-rivertrail,jaruba/chromium.src,rogerwang/chromium,ChromiumWebApps/chromium,dednal/chromium.src,pozdnyakov/chromium-crosswalk,hgl888/chromium-crosswalk-efl,dushu1203/chromium.src,robclark/chromium,mogoweb/chromium-crosswalk,dushu1203/chromium.src,anirudhSK/chromium,jaruba/chromium.src,krieger-od/nwjs_chromium.src,TheTypoMaster/chromium-crosswalk,jaruba/chromium.src,pozdnyakov/chromium-crosswalk,bright-sparks/chromium-spacewalk,zcbenz/cefode-chromium,robclark/chromium,patrickm/chromium.src,dednal/chromium.src,crosswalk-project/chromium-crosswalk-efl,markYoungH/chromium.src,robclark/chromium,zcbenz/cefode-chromium,markYoungH/chromium.src,patrickm/chromium.src,M4sse/chromium.src,Pluto-tv/chromium-crosswalk,nacl-webkit/chrome_deps,crosswalk-project/chromium-crosswalk-efl,Jonekee/chromium.src,robclark/chromium,Jonekee/chromium.src,axinging/chromium-crosswalk,dushu1203/chromium.src,nacl-webkit/chrome_deps,zcbenz/cefode-chromium,PeterWangIntel/chromium-crosswalk,markYoungH/chromium.src,dednal/chromium.src,nacl-webkit/chrome_deps,Chilledheart/chromium,mogoweb/chromium-crosswalk,Pluto-tv/chromium-crosswalk,hujiajie/pa-chromium,junmin-zhu/chromium-rivertrail,hujiajie/pa-chromium,bright-sparks/chromium-spacewalk,ChromiumWebApps/chromium,hgl888/chromium-crosswalk,nacl-webkit/chrome_deps,keishi/chromium,Chilledheart/chromium,mogoweb/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,patrickm/chromium.src,krieger-od/nwjs_chromium.src,pozdnyakov/chromium-crosswalk,timopulkkinen/BubbleFish,hujiajie/pa-chromium,mogoweb/chromium-crosswalk,fujunwei/chromium-crosswalk,keishi/chromium,M4sse/chromium.src,crosswalk-project/chromium-crosswalk-efl,zcbenz/cefode-chromium,krieger-od/nwjs_chromium.src,ChromiumWebApps/chromium,Fireblend/chromium-crosswalk,pozdnyakov/chromium-crosswalk,dednal/chromium.src,robclark/chromium,keishi/chromium,anirudhSK/chromium,Chilledheart/chromium,rogerwang/chromium,ChromiumWebApps/chromium,patrickm/chromium.src,TheTypoMaster/chromium-crosswalk,axinging/chromium-crosswalk,ltilve/chromium,bright-sparks/chromium-spacewalk,littlstar/chromium.src,Fireblend/chromium-crosswalk,littlstar/chromium.src,ltilve/chromium,axinging/chromium-crosswalk,M4sse/chromium.src,ChromiumWebApps/chromium,krieger-od/nwjs_chromium.src,krieger-od/nwjs_chromium.src,Pluto-tv/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,junmin-zhu/chromium-rivertrail,dednal/chromium.src,rogerwang/chromium,hgl888/chromium-crosswalk,littlstar/chromium.src,hgl888/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,pozdnyakov/chromium-crosswalk,junmin-zhu/chromium-rivertrail,Just-D/chromium-1,Just-D/chromium-1,pozdnyakov/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,mogoweb/chromium-crosswalk,jaruba/chromium.src,Chilledheart/chromium,axinging/chromium-crosswalk,ltilve/chromium,krieger-od/nwjs_chromium.src,hujiajie/pa-chromium,Chilledheart/chromium,Fireblend/chromium-crosswalk,hgl888/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,dushu1203/chromium.src,Pluto-tv/chromium-crosswalk,ltilve/chromium,timopulkkinen/BubbleFish,ChromiumWebApps/chromium,junmin-zhu/chromium-rivertrail,fujunwei/chromium-crosswalk,dushu1203/chromium.src,rogerwang/chromium,hgl888/chromium-crosswalk,ondra-novak/chromium.src,ltilve/chromium,axinging/chromium-crosswalk,chuan9/chromium-crosswalk,junmin-zhu/chromium-rivertrail,Just-D/chromium-1,dednal/chromium.src,robclark/chromium,nacl-webkit/chrome_deps,bright-sparks/chromium-spacewalk,fujunwei/chromium-crosswalk,Jonekee/chromium.src,anirudhSK/chromium,mogoweb/chromium-crosswalk,keishi/chromium,Jonekee/chromium.src,dednal/chromium.src,Jonekee/chromium.src,timopulkkinen/BubbleFish,mogoweb/chromium-crosswalk,anirudhSK/chromium,timopulkkinen/BubbleFish,M4sse/chromium.src,jaruba/chromium.src,pozdnyakov/chromium-crosswalk,Pluto-tv/chromium-crosswalk,zcbenz/cefode-chromium,pozdnyakov/chromium-crosswalk,bright-sparks/chromium-spacewalk,PeterWangIntel/chromium-crosswalk,littlstar/chromium.src,keishi/chromium,ondra-novak/chromium.src,PeterWangIntel/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,ChromiumWebApps/chromium,zcbenz/cefode-chromium,patrickm/chromium.src,M4sse/chromium.src,chuan9/chromium-crosswalk,ondra-novak/chromium.src,chuan9/chromium-crosswalk,M4sse/chromium.src,dednal/chromium.src,keishi/chromium,ltilve/chromium,hujiajie/pa-chromium,Chilledheart/chromium,ondra-novak/chromium.src,hgl888/chromium-crosswalk,timopulkkinen/BubbleFish,hgl888/chromium-crosswalk-efl,Just-D/chromium-1,mohamed--abdel-maksoud/chromium.src,chuan9/chromium-crosswalk,ondra-novak/chromium.src,dushu1203/chromium.src,zcbenz/cefode-chromium,hgl888/chromium-crosswalk-efl,hgl888/chromium-crosswalk-efl,mohamed--abdel-maksoud/chromium.src,Chilledheart/chromium,PeterWangIntel/chromium-crosswalk,jaruba/chromium.src,littlstar/chromium.src,mohamed--abdel-maksoud/chromium.src,keishi/chromium,littlstar/chromium.src,robclark/chromium,rogerwang/chromium,bright-sparks/chromium-spacewalk,junmin-zhu/chromium-rivertrail,littlstar/chromium.src,anirudhSK/chromium,dushu1203/chromium.src,krieger-od/nwjs_chromium.src,jaruba/chromium.src,zcbenz/cefode-chromium,ltilve/chromium,littlstar/chromium.src,dushu1203/chromium.src,chuan9/chromium-crosswalk,jaruba/chromium.src,anirudhSK/chromium,axinging/chromium-crosswalk,hgl888/chromium-crosswalk,dednal/chromium.src,jaruba/chromium.src,nacl-webkit/chrome_deps,patrickm/chromium.src,ChromiumWebApps/chromium,hujiajie/pa-chromium,Jonekee/chromium.src,hgl888/chromium-crosswalk-efl,crosswalk-project/chromium-crosswalk-efl,rogerwang/chromium,axinging/chromium-crosswalk,zcbenz/cefode-chromium,timopulkkinen/BubbleFish,timopulkkinen/BubbleFish,ltilve/chromium,markYoungH/chromium.src,crosswalk-project/chromium-crosswalk-efl,M4sse/chromium.src,junmin-zhu/chromium-rivertrail,zcbenz/cefode-chromium,ChromiumWebApps/chromium,Jonekee/chromium.src,anirudhSK/chromium,crosswalk-project/chromium-crosswalk-efl,junmin-zhu/chromium-rivertrail,Just-D/chromium-1,patrickm/chromium.src,TheTypoMaster/chromium-crosswalk,Just-D/chromium-1,keishi/chromium,anirudhSK/chromium,Fireblend/chromium-crosswalk,rogerwang/chromium,markYoungH/chromium.src,patrickm/chromium.src,rogerwang/chromium,markYoungH/chromium.src,Chilledheart/chromium,M4sse/chromium.src,nacl-webkit/chrome_deps,mohamed--abdel-maksoud/chromium.src,Fireblend/chromium-crosswalk,pozdnyakov/chromium-crosswalk,chuan9/chromium-crosswalk,hgl888/chromium-crosswalk-efl,axinging/chromium-crosswalk,timopulkkinen/BubbleFish,PeterWangIntel/chromium-crosswalk,dushu1203/chromium.src,bright-sparks/chromium-spacewalk,Jonekee/chromium.src,keishi/chromium,zcbenz/cefode-chromium,dushu1203/chromium.src,ondra-novak/chromium.src,ondra-novak/chromium.src,Jonekee/chromium.src,M4sse/chromium.src,timopulkkinen/BubbleFish,robclark/chromium,anirudhSK/chromium,Chilledheart/chromium,anirudhSK/chromium,fujunwei/chromium-crosswalk,Pluto-tv/chromium-crosswalk,patrickm/chromium.src,hgl888/chromium-crosswalk-efl,robclark/chromium,TheTypoMaster/chromium-crosswalk,nacl-webkit/chrome_deps,junmin-zhu/chromium-rivertrail,dednal/chromium.src,bright-sparks/chromium-spacewalk,ondra-novak/chromium.src,mohamed--abdel-maksoud/chromium.src,Fireblend/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,krieger-od/nwjs_chromium.src,chuan9/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,Just-D/chromium-1,ChromiumWebApps/chromium,hgl888/chromium-crosswalk,pozdnyakov/chromium-crosswalk,axinging/chromium-crosswalk,hujiajie/pa-chromium,hujiajie/pa-chromium,krieger-od/nwjs_chromium.src,keishi/chromium,bright-sparks/chromium-spacewalk,TheTypoMaster/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,fujunwei/chromium-crosswalk,anirudhSK/chromium,krieger-od/nwjs_chromium.src,Just-D/chromium-1,TheTypoMaster/chromium-crosswalk,hgl888/chromium-crosswalk-efl,mohamed--abdel-maksoud/chromium.src,axinging/chromium-crosswalk,timopulkkinen/BubbleFish,Fireblend/chromium-crosswalk,rogerwang/chromium,M4sse/chromium.src,markYoungH/chromium.src,Pluto-tv/chromium-crosswalk,Fireblend/chromium-crosswalk,Pluto-tv/chromium-crosswalk,Jonekee/chromium.src,Jonekee/chromium.src,jaruba/chromium.src,mohamed--abdel-maksoud/chromium.src,dushu1203/chromium.src,timopulkkinen/BubbleFish,anirudhSK/chromium,nacl-webkit/chrome_deps,fujunwei/chromium-crosswalk,nacl-webkit/chrome_deps,markYoungH/chromium.src,M4sse/chromium.src,ltilve/chromium,fujunwei/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,markYoungH/chromium.src,ChromiumWebApps/chromium,jaruba/chromium.src,mogoweb/chromium-crosswalk,markYoungH/chromium.src,markYoungH/chromium.src,hujiajie/pa-chromium,PeterWangIntel/chromium-crosswalk,hgl888/chromium-crosswalk-efl,nacl-webkit/chrome_deps,pozdnyakov/chromium-crosswalk,fujunwei/chromium-crosswalk,hgl888/chromium-crosswalk,krieger-od/nwjs_chromium.src,mogoweb/chromium-crosswalk,ChromiumWebApps/chromium,dednal/chromium.src,ondra-novak/chromium.src,Just-D/chromium-1,chuan9/chromium-crosswalk,mogoweb/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,crosswalk-project/chromium-crosswalk-efl,axinging/chromium-crosswalk,keishi/chromium,hujiajie/pa-chromium,fujunwei/chromium-crosswalk,hgl888/chromium-crosswalk-efl,Pluto-tv/chromium-crosswalk,hujiajie/pa-chromium,chuan9/chromium-crosswalk,robclark/chromium,Fireblend/chromium-crosswalk,rogerwang/chromium,junmin-zhu/chromium-rivertrail,TheTypoMaster/chromium-crosswalk
|
Fix build: missed a file
TBR=tony
Review URL: http://codereview.chromium.org/13209
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@6449 0039d316-1c4b-4281-b951-d872f2087c98
|
# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Compares the image output of a test to the expected image output using
fuzzy matching.
"""
import errno
import logging
import os
import shutil
import subprocess
from layout_package import path_utils
from layout_package import test_failures
from test_types import test_type_base
class FuzzyImageDiff(test_type_base.TestTypeBase):
def CompareOutput(self, filename, proc, output, test_args):
"""Implementation of CompareOutput that checks the output image and
checksum against the expected files from the LayoutTest directory.
"""
failures = []
# If we didn't produce a hash file, this test must be text-only.
if test_args.hash is None:
return failures
expected_png_file = path_utils.ExpectedFilename(filename,
'.png',
self._platform)
if test_args.show_sources:
logging.debug('Using %s' % expected_png_file)
# Also report a missing expected PNG file.
if not os.path.isfile(expected_png_file):
failures.append(test_failures.FailureMissingImage(self))
# Run the fuzzymatcher
r = subprocess.call(['fuzzymatch', test_args.png_path, expected_png_file])
print ' fuzzymatch returned', r
if r != 0:
failures.append(test_failures.FailureFuzzyFailure(self))
return failures
|
<commit_before><commit_msg>Fix build: missed a file
TBR=tony
Review URL: http://codereview.chromium.org/13209
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@6449 0039d316-1c4b-4281-b951-d872f2087c98<commit_after>
|
# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Compares the image output of a test to the expected image output using
fuzzy matching.
"""
import errno
import logging
import os
import shutil
import subprocess
from layout_package import path_utils
from layout_package import test_failures
from test_types import test_type_base
class FuzzyImageDiff(test_type_base.TestTypeBase):
def CompareOutput(self, filename, proc, output, test_args):
"""Implementation of CompareOutput that checks the output image and
checksum against the expected files from the LayoutTest directory.
"""
failures = []
# If we didn't produce a hash file, this test must be text-only.
if test_args.hash is None:
return failures
expected_png_file = path_utils.ExpectedFilename(filename,
'.png',
self._platform)
if test_args.show_sources:
logging.debug('Using %s' % expected_png_file)
# Also report a missing expected PNG file.
if not os.path.isfile(expected_png_file):
failures.append(test_failures.FailureMissingImage(self))
# Run the fuzzymatcher
r = subprocess.call(['fuzzymatch', test_args.png_path, expected_png_file])
print ' fuzzymatch returned', r
if r != 0:
failures.append(test_failures.FailureFuzzyFailure(self))
return failures
|
Fix build: missed a file
TBR=tony
Review URL: http://codereview.chromium.org/13209
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@6449 0039d316-1c4b-4281-b951-d872f2087c98# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Compares the image output of a test to the expected image output using
fuzzy matching.
"""
import errno
import logging
import os
import shutil
import subprocess
from layout_package import path_utils
from layout_package import test_failures
from test_types import test_type_base
class FuzzyImageDiff(test_type_base.TestTypeBase):
def CompareOutput(self, filename, proc, output, test_args):
"""Implementation of CompareOutput that checks the output image and
checksum against the expected files from the LayoutTest directory.
"""
failures = []
# If we didn't produce a hash file, this test must be text-only.
if test_args.hash is None:
return failures
expected_png_file = path_utils.ExpectedFilename(filename,
'.png',
self._platform)
if test_args.show_sources:
logging.debug('Using %s' % expected_png_file)
# Also report a missing expected PNG file.
if not os.path.isfile(expected_png_file):
failures.append(test_failures.FailureMissingImage(self))
# Run the fuzzymatcher
r = subprocess.call(['fuzzymatch', test_args.png_path, expected_png_file])
print ' fuzzymatch returned', r
if r != 0:
failures.append(test_failures.FailureFuzzyFailure(self))
return failures
|
<commit_before><commit_msg>Fix build: missed a file
TBR=tony
Review URL: http://codereview.chromium.org/13209
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@6449 0039d316-1c4b-4281-b951-d872f2087c98<commit_after># Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Compares the image output of a test to the expected image output using
fuzzy matching.
"""
import errno
import logging
import os
import shutil
import subprocess
from layout_package import path_utils
from layout_package import test_failures
from test_types import test_type_base
class FuzzyImageDiff(test_type_base.TestTypeBase):
def CompareOutput(self, filename, proc, output, test_args):
"""Implementation of CompareOutput that checks the output image and
checksum against the expected files from the LayoutTest directory.
"""
failures = []
# If we didn't produce a hash file, this test must be text-only.
if test_args.hash is None:
return failures
expected_png_file = path_utils.ExpectedFilename(filename,
'.png',
self._platform)
if test_args.show_sources:
logging.debug('Using %s' % expected_png_file)
# Also report a missing expected PNG file.
if not os.path.isfile(expected_png_file):
failures.append(test_failures.FailureMissingImage(self))
# Run the fuzzymatcher
r = subprocess.call(['fuzzymatch', test_args.png_path, expected_png_file])
print ' fuzzymatch returned', r
if r != 0:
failures.append(test_failures.FailureFuzzyFailure(self))
return failures
|
|
09bc3c0714a01795ae832e860dda89c715c934df
|
lms/djangoapps/courseware/migrations/0009_auto_20190703_1955.py
|
lms/djangoapps/courseware/migrations/0009_auto_20190703_1955.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.22 on 2019-07-03 19:55
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('courseware', '0008_move_idde_to_edx_when'),
]
operations = [
migrations.AlterField(
model_name='studentmodulehistory',
name='student_module',
field=models.ForeignKey(db_constraint=False, on_delete=django.db.models.deletion.CASCADE, to='courseware.StudentModule'),
),
]
|
Add CSM drop FK migration
|
Add CSM drop FK migration
|
Python
|
agpl-3.0
|
cpennington/edx-platform,cpennington/edx-platform,edx-solutions/edx-platform,appsembler/edx-platform,msegado/edx-platform,EDUlib/edx-platform,eduNEXT/edx-platform,edx/edx-platform,stvstnfrd/edx-platform,mitocw/edx-platform,angelapper/edx-platform,ESOedX/edx-platform,appsembler/edx-platform,arbrandes/edx-platform,mitocw/edx-platform,ESOedX/edx-platform,edx/edx-platform,EDUlib/edx-platform,arbrandes/edx-platform,stvstnfrd/edx-platform,stvstnfrd/edx-platform,arbrandes/edx-platform,ESOedX/edx-platform,eduNEXT/edunext-platform,appsembler/edx-platform,eduNEXT/edunext-platform,stvstnfrd/edx-platform,arbrandes/edx-platform,msegado/edx-platform,angelapper/edx-platform,msegado/edx-platform,edx-solutions/edx-platform,msegado/edx-platform,eduNEXT/edx-platform,ESOedX/edx-platform,mitocw/edx-platform,edx/edx-platform,angelapper/edx-platform,eduNEXT/edx-platform,EDUlib/edx-platform,edx/edx-platform,appsembler/edx-platform,edx-solutions/edx-platform,angelapper/edx-platform,mitocw/edx-platform,eduNEXT/edx-platform,cpennington/edx-platform,edx-solutions/edx-platform,cpennington/edx-platform,eduNEXT/edunext-platform,eduNEXT/edunext-platform,EDUlib/edx-platform,msegado/edx-platform
|
Add CSM drop FK migration
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.22 on 2019-07-03 19:55
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('courseware', '0008_move_idde_to_edx_when'),
]
operations = [
migrations.AlterField(
model_name='studentmodulehistory',
name='student_module',
field=models.ForeignKey(db_constraint=False, on_delete=django.db.models.deletion.CASCADE, to='courseware.StudentModule'),
),
]
|
<commit_before><commit_msg>Add CSM drop FK migration<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.22 on 2019-07-03 19:55
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('courseware', '0008_move_idde_to_edx_when'),
]
operations = [
migrations.AlterField(
model_name='studentmodulehistory',
name='student_module',
field=models.ForeignKey(db_constraint=False, on_delete=django.db.models.deletion.CASCADE, to='courseware.StudentModule'),
),
]
|
Add CSM drop FK migration# -*- coding: utf-8 -*-
# Generated by Django 1.11.22 on 2019-07-03 19:55
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('courseware', '0008_move_idde_to_edx_when'),
]
operations = [
migrations.AlterField(
model_name='studentmodulehistory',
name='student_module',
field=models.ForeignKey(db_constraint=False, on_delete=django.db.models.deletion.CASCADE, to='courseware.StudentModule'),
),
]
|
<commit_before><commit_msg>Add CSM drop FK migration<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.22 on 2019-07-03 19:55
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('courseware', '0008_move_idde_to_edx_when'),
]
operations = [
migrations.AlterField(
model_name='studentmodulehistory',
name='student_module',
field=models.ForeignKey(db_constraint=False, on_delete=django.db.models.deletion.CASCADE, to='courseware.StudentModule'),
),
]
|
|
4bec379b3cfea87f17b0d5fa5d80148afe87a470
|
examples/plot_pca.py
|
examples/plot_pca.py
|
"""
=========================================
PCA 2d projection of of Iris dataset
=========================================
The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour
and Virginica) with 4 attributes: sepal length, sepal width, petal length
and petal width.
Principal Component Analysis (PCA) applied to this data identifies the
combination of attributes (principal components, or directions in the
feature space) that account for the most variance in the data. Here we
plot the different samples on the 2 first principal components.
"""
import pylab as pl
from scikits.learn import datasets
from scikits.learn.pca import PCA
iris = datasets.load_iris()
X = iris.data
y = iris.target
target_names = iris.target_names
pca = PCA(n_comp=2)
X_r = pca.fit(X).transform(X)
# Percentage of variance explained for each components
print pca.explained_variance_
pl.figure()
pl.hold('on')
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
pl.scatter(X_r[y==i,0], X_r[y==i,1], c=c, label=target_name)
pl.hold('off')
pl.legend()
pl.title('PCA of IRIS dataset')
pl.show()
|
"""
=========================================
PCA 2d projection of of Iris dataset
=========================================
The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour
and Virginica) with 4 attributes: sepal length, sepal width, petal length
and petal width.
Principal Component Analysis (PCA) applied to this data identifies the
combination of attributes (principal components, or directions in the
feature space) that account for the most variance in the data. Here we
plot the different samples on the 2 first principal components.
"""
import pylab as pl
from scikits.learn import datasets
from scikits.learn.pca import PCA
iris = datasets.load_iris()
X = iris.data
y = iris.target
target_names = iris.target_names
pca = PCA(n_comp=2)
X_r = pca.fit(X).transform(X)
# Percentage of variance explained for each components
print pca.explained_variance_
pl.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
pl.scatter(X_r[y==i,0], X_r[y==i,1], c=c, label=target_name)
pl.legend()
pl.title('PCA of IRIS dataset')
pl.show()
|
Remove non-necessary lines from PCA example
|
MISC: Remove non-necessary lines from PCA example
|
Python
|
bsd-3-clause
|
mikebenfield/scikit-learn,liyu1990/sklearn,YinongLong/scikit-learn,Titan-C/scikit-learn,xwolf12/scikit-learn,massmutual/scikit-learn,siutanwong/scikit-learn,tmhm/scikit-learn,Aasmi/scikit-learn,icdishb/scikit-learn,yunfeilu/scikit-learn,quheng/scikit-learn,vibhorag/scikit-learn,kaichogami/scikit-learn,cybernet14/scikit-learn,ssaeger/scikit-learn,ChanChiChoi/scikit-learn,mattilyra/scikit-learn,etkirsch/scikit-learn,treycausey/scikit-learn,ycaihua/scikit-learn,cl4rke/scikit-learn,toastedcornflakes/scikit-learn,elkingtonmcb/scikit-learn,MatthieuBizien/scikit-learn,NunoEdgarGub1/scikit-learn,roxyboy/scikit-learn,potash/scikit-learn,h2educ/scikit-learn,Aasmi/scikit-learn,kylerbrown/scikit-learn,khkaminska/scikit-learn,AlexandreAbraham/scikit-learn,0x0all/scikit-learn,jereze/scikit-learn,larsmans/scikit-learn,pompiduskus/scikit-learn,JPFrancoia/scikit-learn,pnedunuri/scikit-learn,lazywei/scikit-learn,BiaDarkia/scikit-learn,sergeyf/scikit-learn,ElDeveloper/scikit-learn,dingocuster/scikit-learn,djgagne/scikit-learn,alvarofierroclavero/scikit-learn,shangwuhencc/scikit-learn,simon-pepin/scikit-learn,tawsifkhan/scikit-learn,akionakamura/scikit-learn,Akshay0724/scikit-learn,maheshakya/scikit-learn,khkaminska/scikit-learn,ycaihua/scikit-learn,andrewnc/scikit-learn,rvraghav93/scikit-learn,petosegan/scikit-learn,ndingwall/scikit-learn,sumspr/scikit-learn,xubenben/scikit-learn,fabioticconi/scikit-learn,iismd17/scikit-learn,sinhrks/scikit-learn,pnedunuri/scikit-learn,xavierwu/scikit-learn,glennq/scikit-learn,fredhusser/scikit-learn,clemkoa/scikit-learn,davidgbe/scikit-learn,pv/scikit-learn,yyjiang/scikit-learn,depet/scikit-learn,ningchi/scikit-learn,PrashntS/scikit-learn,zihua/scikit-learn,wzbozon/scikit-learn,jzt5132/scikit-learn,marcocaccin/scikit-learn,jakirkham/scikit-learn,mxjl620/scikit-learn,xuewei4d/scikit-learn,liyu1990/sklearn,aflaxman/scikit-learn,rahul-c1/scikit-learn,cdegroc/scikit-learn,victorbergelin/scikit-learn,nmayorov/scikit-learn,IssamLaradji/scikit-learn,mayblue9/scikit-learn,bikong2/scikit-learn,mayblue9/scikit-learn,fbagirov/scikit-learn,fredhusser/scikit-learn,Garrett-R/scikit-learn,yask123/scikit-learn,marcocaccin/scikit-learn,glouppe/scikit-learn,equialgo/scikit-learn,dsullivan7/scikit-learn,gclenaghan/scikit-learn,Fireblend/scikit-learn,xyguo/scikit-learn,shikhardb/scikit-learn,gotomypc/scikit-learn,liangz0707/scikit-learn,yonglehou/scikit-learn,deepesch/scikit-learn,Sentient07/scikit-learn,DonBeo/scikit-learn,rishikksh20/scikit-learn,466152112/scikit-learn,CVML/scikit-learn,sgenoud/scikit-learn,anirudhjayaraman/scikit-learn,RPGOne/scikit-learn,appapantula/scikit-learn,ltiao/scikit-learn,zaxtax/scikit-learn,aminert/scikit-learn,rishikksh20/scikit-learn,LiaoPan/scikit-learn,eg-zhang/scikit-learn,nelson-liu/scikit-learn,cybernet14/scikit-learn,waterponey/scikit-learn,depet/scikit-learn,hlin117/scikit-learn,loli/sklearn-ensembletrees,jlegendary/scikit-learn,ZENGXH/scikit-learn,sumspr/scikit-learn,Clyde-fare/scikit-learn,Jimmy-Morzaria/scikit-learn,marcocaccin/scikit-learn,fengzhyuan/scikit-learn,shyamalschandra/scikit-learn,florian-f/sklearn,lin-credible/scikit-learn,anirudhjayaraman/scikit-learn,fyffyt/scikit-learn,ngoix/OCRF,Lawrence-Liu/scikit-learn,DonBeo/scikit-learn,mattilyra/scikit-learn,ZENGXH/scikit-learn,jm-begon/scikit-learn,LiaoPan/scikit-learn,vermouthmjl/scikit-learn,jorge2703/scikit-learn,smartscheduling/scikit-learn-categorical-tree,ZENGXH/scikit-learn,appapantula/scikit-learn,wlamond/scikit-learn,f3r/scikit-learn,equialgo/scikit-learn,davidgbe/scikit-learn,thilbern/scikit-learn,jakobworldpeace/scikit-learn,yask123/scikit-learn,roxyboy/scikit-learn,dsullivan7/scikit-learn,zorojean/scikit-learn,elkingtonmcb/scikit-learn,IssamLaradji/scikit-learn,robbymeals/scikit-learn,sgenoud/scikit-learn,AIML/scikit-learn,quheng/scikit-learn,ltiao/scikit-learn,Jimmy-Morzaria/scikit-learn,dingocuster/scikit-learn,untom/scikit-learn,zihua/scikit-learn,procoder317/scikit-learn,liberatorqjw/scikit-learn,CforED/Machine-Learning,jseabold/scikit-learn,stylianos-kampakis/scikit-learn,hlin117/scikit-learn,Djabbz/scikit-learn,spallavolu/scikit-learn,NunoEdgarGub1/scikit-learn,AnasGhrab/scikit-learn,qifeigit/scikit-learn,RPGOne/scikit-learn,466152112/scikit-learn,shenzebang/scikit-learn,Srisai85/scikit-learn,lenovor/scikit-learn,mfjb/scikit-learn,huobaowangxi/scikit-learn,aflaxman/scikit-learn,shusenl/scikit-learn,pnedunuri/scikit-learn,untom/scikit-learn,vybstat/scikit-learn,vibhorag/scikit-learn,glemaitre/scikit-learn,ogrisel/scikit-learn,chrsrds/scikit-learn,wanggang3333/scikit-learn,cauchycui/scikit-learn,ogrisel/scikit-learn,abimannans/scikit-learn,gotomypc/scikit-learn,yanlend/scikit-learn,ElDeveloper/scikit-learn,victorbergelin/scikit-learn,theoryno3/scikit-learn,jpautom/scikit-learn,jblackburne/scikit-learn,djgagne/scikit-learn,adamgreenhall/scikit-learn,lin-credible/scikit-learn,zuku1985/scikit-learn,hrjn/scikit-learn,idlead/scikit-learn,altairpearl/scikit-learn,meduz/scikit-learn,plissonf/scikit-learn,lucidfrontier45/scikit-learn,mikebenfield/scikit-learn,victorbergelin/scikit-learn,hlin117/scikit-learn,pkruskal/scikit-learn,kashif/scikit-learn,eg-zhang/scikit-learn,IssamLaradji/scikit-learn,0x0all/scikit-learn,ZenDevelopmentSystems/scikit-learn,untom/scikit-learn,JeanKossaifi/scikit-learn,Clyde-fare/scikit-learn,f3r/scikit-learn,TomDLT/scikit-learn,ZenDevelopmentSystems/scikit-learn,IshankGulati/scikit-learn,olologin/scikit-learn,frank-tancf/scikit-learn,beepee14/scikit-learn,rahul-c1/scikit-learn,xyguo/scikit-learn,waterponey/scikit-learn,yyjiang/scikit-learn,nvoron23/scikit-learn,ashhher3/scikit-learn,3manuek/scikit-learn,jorik041/scikit-learn,Titan-C/scikit-learn,ChanderG/scikit-learn,betatim/scikit-learn,bthirion/scikit-learn,carrillo/scikit-learn,vigilv/scikit-learn,spallavolu/scikit-learn,thilbern/scikit-learn,LiaoPan/scikit-learn,shahankhatch/scikit-learn,mhue/scikit-learn,ahoyosid/scikit-learn,PatrickOReilly/scikit-learn,bnaul/scikit-learn,wazeerzulfikar/scikit-learn,jm-begon/scikit-learn,murali-munna/scikit-learn,chrsrds/scikit-learn,toastedcornflakes/scikit-learn,mehdidc/scikit-learn,billy-inn/scikit-learn,anntzer/scikit-learn,pythonvietnam/scikit-learn,zaxtax/scikit-learn,jereze/scikit-learn,abimannans/scikit-learn,giorgiop/scikit-learn,DonBeo/scikit-learn,lucidfrontier45/scikit-learn,pypot/scikit-learn,etkirsch/scikit-learn,cainiaocome/scikit-learn,AlexRobson/scikit-learn,DSLituiev/scikit-learn,ephes/scikit-learn,sarahgrogan/scikit-learn,herilalaina/scikit-learn,Clyde-fare/scikit-learn,CforED/Machine-Learning,Vimos/scikit-learn,dhruv13J/scikit-learn,ilyes14/scikit-learn,hugobowne/scikit-learn,imaculate/scikit-learn,shenzebang/scikit-learn,ogrisel/scikit-learn,akionakamura/scikit-learn,Fireblend/scikit-learn,bthirion/scikit-learn,maheshakya/scikit-learn,UNR-AERIAL/scikit-learn,vortex-ape/scikit-learn,frank-tancf/scikit-learn,nhejazi/scikit-learn,yunfeilu/scikit-learn,vivekmishra1991/scikit-learn,liangz0707/scikit-learn,henrykironde/scikit-learn,jmschrei/scikit-learn,jkarnows/scikit-learn,hsiaoyi0504/scikit-learn,robin-lai/scikit-learn,glennq/scikit-learn,zaxtax/scikit-learn,xuewei4d/scikit-learn,jmetzen/scikit-learn,gotomypc/scikit-learn,jjx02230808/project0223,kmike/scikit-learn,hsiaoyi0504/scikit-learn,AIML/scikit-learn,kjung/scikit-learn,jereze/scikit-learn,yyjiang/scikit-learn,procoder317/scikit-learn,shangwuhencc/scikit-learn,belltailjp/scikit-learn,Jimmy-Morzaria/scikit-learn,schets/scikit-learn,stylianos-kampakis/scikit-learn,rsivapr/scikit-learn,samzhang111/scikit-learn,betatim/scikit-learn,ilo10/scikit-learn,Nyker510/scikit-learn,nelson-liu/scikit-learn,arjoly/scikit-learn,LohithBlaze/scikit-learn,ltiao/scikit-learn,themrmax/scikit-learn,h2educ/scikit-learn,billy-inn/scikit-learn,MatthieuBizien/scikit-learn,q1ang/scikit-learn,zorroblue/scikit-learn,MartinDelzant/scikit-learn,Garrett-R/scikit-learn,trungnt13/scikit-learn,espg/scikit-learn,jmschrei/scikit-learn,olologin/scikit-learn,smartscheduling/scikit-learn-categorical-tree,devanshdalal/scikit-learn,herilalaina/scikit-learn,robin-lai/scikit-learn,sarahgrogan/scikit-learn,shangwuhencc/scikit-learn,Akshay0724/scikit-learn,aabadie/scikit-learn,vermouthmjl/scikit-learn,mattgiguere/scikit-learn,jereze/scikit-learn,tosolveit/scikit-learn,deepesch/scikit-learn,andaag/scikit-learn,jblackburne/scikit-learn,ycaihua/scikit-learn,rexshihaoren/scikit-learn,IndraVikas/scikit-learn,RayMick/scikit-learn,Garrett-R/scikit-learn,shusenl/scikit-learn,wanggang3333/scikit-learn,jayflo/scikit-learn,thientu/scikit-learn,Sentient07/scikit-learn,ky822/scikit-learn,JPFrancoia/scikit-learn,djgagne/scikit-learn,vigilv/scikit-learn,billy-inn/scikit-learn,jkarnows/scikit-learn,nhejazi/scikit-learn,PrashntS/scikit-learn,arjoly/scikit-learn,kevin-intel/scikit-learn,manhhomienbienthuy/scikit-learn,ky822/scikit-learn,gclenaghan/scikit-learn,cainiaocome/scikit-learn,mjgrav2001/scikit-learn,toastedcornflakes/scikit-learn,jblackburne/scikit-learn,eickenberg/scikit-learn,xyguo/scikit-learn,moutai/scikit-learn,alvarofierroclavero/scikit-learn,jseabold/scikit-learn,nrhine1/scikit-learn,rrohan/scikit-learn,kmike/scikit-learn,nrhine1/scikit-learn,hdmetor/scikit-learn,PatrickOReilly/scikit-learn,larsmans/scikit-learn,trungnt13/scikit-learn,fzalkow/scikit-learn,loli/semisupervisedforests,plissonf/scikit-learn,arabenjamin/scikit-learn,potash/scikit-learn,ishanic/scikit-learn,adamgreenhall/scikit-learn,kashif/scikit-learn,jpautom/scikit-learn,joernhees/scikit-learn,poryfly/scikit-learn,ZENGXH/scikit-learn,mhue/scikit-learn,NelisVerhoef/scikit-learn,B3AU/waveTree,madjelan/scikit-learn,florian-f/sklearn,andaag/scikit-learn,Myasuka/scikit-learn,simon-pepin/scikit-learn,nhejazi/scikit-learn,raghavrv/scikit-learn,UNR-AERIAL/scikit-learn,cwu2011/scikit-learn,simon-pepin/scikit-learn,AlexRobson/scikit-learn,quheng/scikit-learn,cl4rke/scikit-learn,zorroblue/scikit-learn,dsullivan7/scikit-learn,ldirer/scikit-learn,meduz/scikit-learn,harshaneelhg/scikit-learn,beepee14/scikit-learn,raghavrv/scikit-learn,sumspr/scikit-learn,zhenv5/scikit-learn,zhenv5/scikit-learn,hainm/scikit-learn,jlegendary/scikit-learn,abhishekgahlot/scikit-learn,xiaoxiamii/scikit-learn,JPFrancoia/scikit-learn,ldirer/scikit-learn,cainiaocome/scikit-learn,mwv/scikit-learn,kevin-intel/scikit-learn,dsullivan7/scikit-learn,macks22/scikit-learn,mattgiguere/scikit-learn,sergeyf/scikit-learn,rajat1994/scikit-learn,meduz/scikit-learn,vinayak-mehta/scikit-learn,manashmndl/scikit-learn,abhishekgahlot/scikit-learn,ZenDevelopmentSystems/scikit-learn,aminert/scikit-learn,pythonvietnam/scikit-learn,zorojean/scikit-learn,r-mart/scikit-learn,zihua/scikit-learn,aminert/scikit-learn,jkarnows/scikit-learn,hlin117/scikit-learn,evgchz/scikit-learn,pypot/scikit-learn,Titan-C/scikit-learn,arabenjamin/scikit-learn,sgenoud/scikit-learn,ycaihua/scikit-learn,rexshihaoren/scikit-learn,fabianp/scikit-learn,khkaminska/scikit-learn,abhishekgahlot/scikit-learn,pratapvardhan/scikit-learn,trankmichael/scikit-learn,mblondel/scikit-learn,xuewei4d/scikit-learn,bigdataelephants/scikit-learn,aflaxman/scikit-learn,jjx02230808/project0223,rahul-c1/scikit-learn,yonglehou/scikit-learn,ilo10/scikit-learn,victorbergelin/scikit-learn,lbishal/scikit-learn,Windy-Ground/scikit-learn,wlamond/scikit-learn,siutanwong/scikit-learn,pv/scikit-learn,jjx02230808/project0223,AlexRobson/scikit-learn,davidgbe/scikit-learn,TomDLT/scikit-learn,rexshihaoren/scikit-learn,samzhang111/scikit-learn,Achuth17/scikit-learn,Nyker510/scikit-learn,beepee14/scikit-learn,xubenben/scikit-learn,poryfly/scikit-learn,466152112/scikit-learn,heli522/scikit-learn,rohanp/scikit-learn,RomainBrault/scikit-learn,tdhopper/scikit-learn,liberatorqjw/scikit-learn,treycausey/scikit-learn,arahuja/scikit-learn,scikit-learn/scikit-learn,appapantula/scikit-learn,wazeerzulfikar/scikit-learn,yanlend/scikit-learn,Obus/scikit-learn,bigdataelephants/scikit-learn,mojoboss/scikit-learn,zorojean/scikit-learn,jlegendary/scikit-learn,RPGOne/scikit-learn,joshloyal/scikit-learn,trankmichael/scikit-learn,f3r/scikit-learn,YinongLong/scikit-learn,themrmax/scikit-learn,ky822/scikit-learn,krez13/scikit-learn,huobaowangxi/scikit-learn,abimannans/scikit-learn,jpautom/scikit-learn,heli522/scikit-learn,jzt5132/scikit-learn,lesteve/scikit-learn,cwu2011/scikit-learn,mjgrav2001/scikit-learn,rahuldhote/scikit-learn,glouppe/scikit-learn,akionakamura/scikit-learn,henrykironde/scikit-learn,mhue/scikit-learn,3manuek/scikit-learn,ningchi/scikit-learn,loli/sklearn-ensembletrees,mugizico/scikit-learn,saiwing-yeung/scikit-learn,shikhardb/scikit-learn,shyamalschandra/scikit-learn,anurag313/scikit-learn,saiwing-yeung/scikit-learn,krez13/scikit-learn,dsquareindia/scikit-learn,aetilley/scikit-learn,RachitKansal/scikit-learn,bhargav/scikit-learn,elkingtonmcb/scikit-learn,altairpearl/scikit-learn,ChanChiChoi/scikit-learn,thientu/scikit-learn,justincassidy/scikit-learn,anntzer/scikit-learn,btabibian/scikit-learn,LohithBlaze/scikit-learn,xubenben/scikit-learn,mayblue9/scikit-learn,ashhher3/scikit-learn,tosolveit/scikit-learn,idlead/scikit-learn,jjx02230808/project0223,ldirer/scikit-learn,marcocaccin/scikit-learn,aetilley/scikit-learn,JosmanPS/scikit-learn,fengzhyuan/scikit-learn,Jimmy-Morzaria/scikit-learn,vinayak-mehta/scikit-learn,terkkila/scikit-learn,rahul-c1/scikit-learn,ishanic/scikit-learn,ishanic/scikit-learn,ankurankan/scikit-learn,equialgo/scikit-learn,hdmetor/scikit-learn,alexeyum/scikit-learn,hrjn/scikit-learn,murali-munna/scikit-learn,joernhees/scikit-learn,nomadcube/scikit-learn,poryfly/scikit-learn,h2educ/scikit-learn,mhdella/scikit-learn,zihua/scikit-learn,sgenoud/scikit-learn,luo66/scikit-learn,RayMick/scikit-learn,CVML/scikit-learn,IndraVikas/scikit-learn,potash/scikit-learn,tmhm/scikit-learn,shusenl/scikit-learn,cybernet14/scikit-learn,yunfeilu/scikit-learn,trankmichael/scikit-learn,hsiaoyi0504/scikit-learn,henrykironde/scikit-learn,nomadcube/scikit-learn,loli/semisupervisedforests,0asa/scikit-learn,lin-credible/scikit-learn,ChanderG/scikit-learn,eickenberg/scikit-learn,vermouthmjl/scikit-learn,smartscheduling/scikit-learn-categorical-tree,sanketloke/scikit-learn,shikhardb/scikit-learn,PrashntS/scikit-learn,samuel1208/scikit-learn,nikitasingh981/scikit-learn,qifeigit/scikit-learn,bigdataelephants/scikit-learn,MartinDelzant/scikit-learn,massmutual/scikit-learn,anirudhjayaraman/scikit-learn,rrohan/scikit-learn,zorroblue/scikit-learn,justincassidy/scikit-learn,shenzebang/scikit-learn,madjelan/scikit-learn,yunfeilu/scikit-learn,kaichogami/scikit-learn,nomadcube/scikit-learn,wlamond/scikit-learn,ngoix/OCRF,PatrickOReilly/scikit-learn,CforED/Machine-Learning,lbishal/scikit-learn,JPFrancoia/scikit-learn,CVML/scikit-learn,herilalaina/scikit-learn,PrashntS/scikit-learn,sonnyhu/scikit-learn,krez13/scikit-learn,alexsavio/scikit-learn,mjgrav2001/scikit-learn,tdhopper/scikit-learn,loli/sklearn-ensembletrees,vortex-ape/scikit-learn,khkaminska/scikit-learn,fabianp/scikit-learn,stylianos-kampakis/scikit-learn,olologin/scikit-learn,cauchycui/scikit-learn,mrshu/scikit-learn,glouppe/scikit-learn,aflaxman/scikit-learn,siutanwong/scikit-learn,fengzhyuan/scikit-learn,roxyboy/scikit-learn,Srisai85/scikit-learn,RachitKansal/scikit-learn,pianomania/scikit-learn,JosmanPS/scikit-learn,glouppe/scikit-learn,mblondel/scikit-learn,fbagirov/scikit-learn,kmike/scikit-learn,nomadcube/scikit-learn,petosegan/scikit-learn,mwv/scikit-learn,BiaDarkia/scikit-learn,xavierwu/scikit-learn,adamgreenhall/scikit-learn,Garrett-R/scikit-learn,adamgreenhall/scikit-learn,466152112/scikit-learn,RayMick/scikit-learn,jayflo/scikit-learn,PatrickOReilly/scikit-learn,cdegroc/scikit-learn,smartscheduling/scikit-learn-categorical-tree,amueller/scikit-learn,kagayakidan/scikit-learn,pianomania/scikit-learn,pompiduskus/scikit-learn,nesterione/scikit-learn,sergeyf/scikit-learn,clemkoa/scikit-learn,altairpearl/scikit-learn,Achuth17/scikit-learn,bthirion/scikit-learn,theoryno3/scikit-learn,Obus/scikit-learn,vermouthmjl/scikit-learn,dingocuster/scikit-learn,Fireblend/scikit-learn,arahuja/scikit-learn,AlexanderFabisch/scikit-learn,mugizico/scikit-learn,jorge2703/scikit-learn,mwv/scikit-learn,theoryno3/scikit-learn,robin-lai/scikit-learn,chrsrds/scikit-learn,NunoEdgarGub1/scikit-learn,zhenv5/scikit-learn,IshankGulati/scikit-learn,etkirsch/scikit-learn,alexeyum/scikit-learn,untom/scikit-learn,HolgerPeters/scikit-learn,B3AU/waveTree,rahuldhote/scikit-learn,schets/scikit-learn,nhejazi/scikit-learn,zuku1985/scikit-learn,vybstat/scikit-learn,r-mart/scikit-learn,tomlof/scikit-learn,ndingwall/scikit-learn,hitszxp/scikit-learn,luo66/scikit-learn,RomainBrault/scikit-learn,joernhees/scikit-learn,costypetrisor/scikit-learn,q1ang/scikit-learn,abhishekgahlot/scikit-learn,eickenberg/scikit-learn,btabibian/scikit-learn,B3AU/waveTree,qifeigit/scikit-learn,pianomania/scikit-learn,cdegroc/scikit-learn,kagayakidan/scikit-learn,aetilley/scikit-learn,rrohan/scikit-learn,samuel1208/scikit-learn,chrisburr/scikit-learn,ssaeger/scikit-learn,AlexandreAbraham/scikit-learn,tosolveit/scikit-learn,tawsifkhan/scikit-learn,liyu1990/sklearn,poryfly/scikit-learn,eg-zhang/scikit-learn,imaculate/scikit-learn,Barmaley-exe/scikit-learn,ivannz/scikit-learn,icdishb/scikit-learn,abimannans/scikit-learn,eickenberg/scikit-learn,kaichogami/scikit-learn,ashhher3/scikit-learn,rsivapr/scikit-learn,B3AU/waveTree,MartinDelzant/scikit-learn,belltailjp/scikit-learn,robbymeals/scikit-learn,tomlof/scikit-learn,eg-zhang/scikit-learn,lin-credible/scikit-learn,mhdella/scikit-learn,cauchycui/scikit-learn,phdowling/scikit-learn,jorik041/scikit-learn,fredhusser/scikit-learn,walterreade/scikit-learn,kaichogami/scikit-learn,plissonf/scikit-learn,ankurankan/scikit-learn,RomainBrault/scikit-learn,xuewei4d/scikit-learn,HolgerPeters/scikit-learn,theoryno3/scikit-learn,ChanChiChoi/scikit-learn,joernhees/scikit-learn,jakobworldpeace/scikit-learn,mlyundin/scikit-learn,ephes/scikit-learn,mayblue9/scikit-learn,mikebenfield/scikit-learn,evgchz/scikit-learn,robin-lai/scikit-learn,ycaihua/scikit-learn,vivekmishra1991/scikit-learn,IndraVikas/scikit-learn,jayflo/scikit-learn,liangz0707/scikit-learn,tmhm/scikit-learn,vybstat/scikit-learn,clemkoa/scikit-learn,tmhm/scikit-learn,ClimbsRocks/scikit-learn,jaidevd/scikit-learn,mblondel/scikit-learn,maheshakya/scikit-learn,meduz/scikit-learn,Vimos/scikit-learn,Aasmi/scikit-learn,xubenben/scikit-learn,TomDLT/scikit-learn,murali-munna/scikit-learn,ElDeveloper/scikit-learn,justincassidy/scikit-learn,moutai/scikit-learn,lbishal/scikit-learn,mugizico/scikit-learn,Djabbz/scikit-learn,frank-tancf/scikit-learn,vortex-ape/scikit-learn,AlexanderFabisch/scikit-learn,NelisVerhoef/scikit-learn,kmike/scikit-learn,jzt5132/scikit-learn,Windy-Ground/scikit-learn,B3AU/waveTree,q1ang/scikit-learn,Nyker510/scikit-learn,michigraber/scikit-learn,ChanderG/scikit-learn,shikhardb/scikit-learn,qifeigit/scikit-learn,pianomania/scikit-learn,yonglehou/scikit-learn,mjgrav2001/scikit-learn,rsivapr/scikit-learn,loli/sklearn-ensembletrees,jakirkham/scikit-learn,Sentient07/scikit-learn,dhruv13J/scikit-learn,ngoix/OCRF,mattgiguere/scikit-learn,liyu1990/sklearn,henridwyer/scikit-learn,AnasGhrab/scikit-learn,hrjn/scikit-learn,pypot/scikit-learn,deepesch/scikit-learn,vshtanko/scikit-learn,mojoboss/scikit-learn,kmike/scikit-learn,Obus/scikit-learn,jaidevd/scikit-learn,trungnt13/scikit-learn,loli/semisupervisedforests,phdowling/scikit-learn,RPGOne/scikit-learn,andrewnc/scikit-learn,aabadie/scikit-learn,ClimbsRocks/scikit-learn,henridwyer/scikit-learn,anurag313/scikit-learn,jaidevd/scikit-learn,espg/scikit-learn,lucidfrontier45/scikit-learn,samzhang111/scikit-learn,lenovor/scikit-learn,heli522/scikit-learn,spallavolu/scikit-learn,0x0all/scikit-learn,alexeyum/scikit-learn,sarahgrogan/scikit-learn,jmetzen/scikit-learn,fredhusser/scikit-learn,madjelan/scikit-learn,IndraVikas/scikit-learn,LohithBlaze/scikit-learn,thilbern/scikit-learn,djgagne/scikit-learn,mattgiguere/scikit-learn,frank-tancf/scikit-learn,quheng/scikit-learn,massmutual/scikit-learn,larsmans/scikit-learn,gotomypc/scikit-learn,PatrickChrist/scikit-learn,arjoly/scikit-learn,ivannz/scikit-learn,Adai0808/scikit-learn,betatim/scikit-learn,bhargav/scikit-learn,evgchz/scikit-learn,ElDeveloper/scikit-learn,ephes/scikit-learn,manhhomienbienthuy/scikit-learn,Nyker510/scikit-learn,AlexandreAbraham/scikit-learn,jlegendary/scikit-learn,etkirsch/scikit-learn,cainiaocome/scikit-learn,trankmichael/scikit-learn,DonBeo/scikit-learn,petosegan/scikit-learn,ilyes14/scikit-learn,xzh86/scikit-learn,pv/scikit-learn,wanggang3333/scikit-learn,AlexanderFabisch/scikit-learn,xiaoxiamii/scikit-learn,glennq/scikit-learn,yonglehou/scikit-learn,PatrickChrist/scikit-learn,arahuja/scikit-learn,ankurankan/scikit-learn,ominux/scikit-learn,treycausey/scikit-learn,bnaul/scikit-learn,walterreade/scikit-learn,0asa/scikit-learn,ClimbsRocks/scikit-learn,espg/scikit-learn,arahuja/scikit-learn,fengzhyuan/scikit-learn,krez13/scikit-learn,billy-inn/scikit-learn,iismd17/scikit-learn,costypetrisor/scikit-learn,shahankhatch/scikit-learn,voxlol/scikit-learn,sinhrks/scikit-learn,wazeerzulfikar/scikit-learn,MartinSavc/scikit-learn,andaag/scikit-learn,alvarofierroclavero/scikit-learn,Clyde-fare/scikit-learn,jakobworldpeace/scikit-learn,MohammedWasim/scikit-learn,r-mart/scikit-learn,andaag/scikit-learn,nmayorov/scikit-learn,henridwyer/scikit-learn,amueller/scikit-learn,wzbozon/scikit-learn,MartinSavc/scikit-learn,jmschrei/scikit-learn,idlead/scikit-learn,xiaoxiamii/scikit-learn,russel1237/scikit-learn,vigilv/scikit-learn,btabibian/scikit-learn,aminert/scikit-learn,henridwyer/scikit-learn,jayflo/scikit-learn,anntzer/scikit-learn,bikong2/scikit-learn,pnedunuri/scikit-learn,mojoboss/scikit-learn,procoder317/scikit-learn,fabioticconi/scikit-learn,kevin-intel/scikit-learn,ZenDevelopmentSystems/scikit-learn,alexsavio/scikit-learn,0x0all/scikit-learn,Myasuka/scikit-learn,zuku1985/scikit-learn,HolgerPeters/scikit-learn,OshynSong/scikit-learn,NunoEdgarGub1/scikit-learn,glemaitre/scikit-learn,madjelan/scikit-learn,pkruskal/scikit-learn,cauchycui/scikit-learn,hitszxp/scikit-learn,ky822/scikit-learn,mhdella/scikit-learn,q1ang/scikit-learn,florian-f/sklearn,fbagirov/scikit-learn,moutai/scikit-learn,andrewnc/scikit-learn,gclenaghan/scikit-learn,hsuantien/scikit-learn,0asa/scikit-learn,liangz0707/scikit-learn,toastedcornflakes/scikit-learn,waterponey/scikit-learn,MartinDelzant/scikit-learn,harshaneelhg/scikit-learn,nvoron23/scikit-learn,ominux/scikit-learn,0asa/scikit-learn,iismd17/scikit-learn,jorik041/scikit-learn,mrshu/scikit-learn,jmetzen/scikit-learn,jorik041/scikit-learn,rsivapr/scikit-learn,themrmax/scikit-learn,JeanKossaifi/scikit-learn,chrisburr/scikit-learn,bigdataelephants/scikit-learn,xavierwu/scikit-learn,terkkila/scikit-learn,anurag313/scikit-learn,pkruskal/scikit-learn,florian-f/sklearn,ssaeger/scikit-learn,PatrickChrist/scikit-learn,yask123/scikit-learn,russel1237/scikit-learn,petosegan/scikit-learn,ashhher3/scikit-learn,mjudsp/Tsallis,themrmax/scikit-learn,imaculate/scikit-learn,lenovor/scikit-learn,h2educ/scikit-learn,AlexanderFabisch/scikit-learn,ningchi/scikit-learn,herilalaina/scikit-learn,bnaul/scikit-learn,samuel1208/scikit-learn,fbagirov/scikit-learn,rexshihaoren/scikit-learn,kashif/scikit-learn,MechCoder/scikit-learn,phdowling/scikit-learn,bhargav/scikit-learn,alexsavio/scikit-learn,massmutual/scikit-learn,yanlend/scikit-learn,cybernet14/scikit-learn,kjung/scikit-learn,abhishekgahlot/scikit-learn,carrillo/scikit-learn,Barmaley-exe/scikit-learn,huzq/scikit-learn,ChanChiChoi/scikit-learn,MechCoder/scikit-learn,hdmetor/scikit-learn,AIML/scikit-learn,sanketloke/scikit-learn,vigilv/scikit-learn,ivannz/scikit-learn,huobaowangxi/scikit-learn,shyamalschandra/scikit-learn,ltiao/scikit-learn,abhishekkrthakur/scikit-learn,icdishb/scikit-learn,BiaDarkia/scikit-learn,mxjl620/scikit-learn,AlexRobson/scikit-learn,Lawrence-Liu/scikit-learn,shangwuhencc/scikit-learn,giorgiop/scikit-learn,Adai0808/scikit-learn,lbishal/scikit-learn,RayMick/scikit-learn,tomlof/scikit-learn,ssaeger/scikit-learn,michigraber/scikit-learn,lucidfrontier45/scikit-learn,hainm/scikit-learn,LohithBlaze/scikit-learn,vibhorag/scikit-learn,aabadie/scikit-learn,jakirkham/scikit-learn,vinayak-mehta/scikit-learn,tomlof/scikit-learn,cwu2011/scikit-learn,sgenoud/scikit-learn,Vimos/scikit-learn,mjudsp/Tsallis,ngoix/OCRF,fyffyt/scikit-learn,hugobowne/scikit-learn,IssamLaradji/scikit-learn,mfjb/scikit-learn,BiaDarkia/scikit-learn,larsmans/scikit-learn,mxjl620/scikit-learn,harshaneelhg/scikit-learn,glemaitre/scikit-learn,treycausey/scikit-learn,tosolveit/scikit-learn,mojoboss/scikit-learn,OshynSong/scikit-learn,Adai0808/scikit-learn,Aasmi/scikit-learn,YinongLong/scikit-learn,Windy-Ground/scikit-learn,equialgo/scikit-learn,Achuth17/scikit-learn,pythonvietnam/scikit-learn,liberatorqjw/scikit-learn,shahankhatch/scikit-learn,sarahgrogan/scikit-learn,zuku1985/scikit-learn,walterreade/scikit-learn,manashmndl/scikit-learn,manhhomienbienthuy/scikit-learn,ankurankan/scikit-learn,ankurankan/scikit-learn,nelson-liu/scikit-learn,aewhatley/scikit-learn,jblackburne/scikit-learn,rajat1994/scikit-learn,plissonf/scikit-learn,tdhopper/scikit-learn,Vimos/scikit-learn,giorgiop/scikit-learn,bnaul/scikit-learn,hitszxp/scikit-learn,vortex-ape/scikit-learn,lazywei/scikit-learn,bikong2/scikit-learn,jakirkham/scikit-learn,voxlol/scikit-learn,amueller/scikit-learn,PatrickChrist/scikit-learn,yask123/scikit-learn,lenovor/scikit-learn,Lawrence-Liu/scikit-learn,cdegroc/scikit-learn,jaidevd/scikit-learn,lesteve/scikit-learn,ngoix/OCRF,russel1237/scikit-learn,davidgbe/scikit-learn,MohammedWasim/scikit-learn,mattilyra/scikit-learn,kjung/scikit-learn,nrhine1/scikit-learn,rohanp/scikit-learn,r-mart/scikit-learn,macks22/scikit-learn,Adai0808/scikit-learn,rsivapr/scikit-learn,manashmndl/scikit-learn,ndingwall/scikit-learn,sonnyhu/scikit-learn,fabioticconi/scikit-learn,OshynSong/scikit-learn,clemkoa/scikit-learn,nmayorov/scikit-learn,AnasGhrab/scikit-learn,AIML/scikit-learn,DSLituiev/scikit-learn,hsuantien/scikit-learn,nikitasingh981/scikit-learn,abhishekkrthakur/scikit-learn,vibhorag/scikit-learn,vybstat/scikit-learn,potash/scikit-learn,scikit-learn/scikit-learn,mblondel/scikit-learn,depet/scikit-learn,ishanic/scikit-learn,aewhatley/scikit-learn,aabadie/scikit-learn,jakobworldpeace/scikit-learn,cl4rke/scikit-learn,tdhopper/scikit-learn,maheshakya/scikit-learn,ldirer/scikit-learn,treycausey/scikit-learn,murali-munna/scikit-learn,bikong2/scikit-learn,larsmans/scikit-learn,wlamond/scikit-learn,fzalkow/scikit-learn,pv/scikit-learn,MechCoder/scikit-learn,evgchz/scikit-learn,mwv/scikit-learn,macks22/scikit-learn,devanshdalal/scikit-learn,terkkila/scikit-learn,wazeerzulfikar/scikit-learn,michigraber/scikit-learn,samzhang111/scikit-learn,mrshu/scikit-learn,anurag313/scikit-learn,belltailjp/scikit-learn,hsuantien/scikit-learn,Barmaley-exe/scikit-learn,AlexandreAbraham/scikit-learn,huzq/scikit-learn,devanshdalal/scikit-learn,shenzebang/scikit-learn,ClimbsRocks/scikit-learn,fzalkow/scikit-learn,mxjl620/scikit-learn,mrshu/scikit-learn,olologin/scikit-learn,DSLituiev/scikit-learn,kjung/scikit-learn,xyguo/scikit-learn,fabioticconi/scikit-learn,dsquareindia/scikit-learn,CforED/Machine-Learning,schets/scikit-learn,akionakamura/scikit-learn,jzt5132/scikit-learn,dhruv13J/scikit-learn,IshankGulati/scikit-learn,joshloyal/scikit-learn,Obus/scikit-learn,sonnyhu/scikit-learn,manhhomienbienthuy/scikit-learn,sinhrks/scikit-learn,nikitasingh981/scikit-learn,depet/scikit-learn,aetilley/scikit-learn,ndingwall/scikit-learn,russel1237/scikit-learn,xzh86/scikit-learn,loli/semisupervisedforests,vivekmishra1991/scikit-learn,mlyundin/scikit-learn,fyffyt/scikit-learn,terkkila/scikit-learn,sergeyf/scikit-learn,espg/scikit-learn,Sentient07/scikit-learn,lesteve/scikit-learn,zorroblue/scikit-learn,robbymeals/scikit-learn,Windy-Ground/scikit-learn,mehdidc/scikit-learn,JeanKossaifi/scikit-learn,wzbozon/scikit-learn,mehdidc/scikit-learn,Akshay0724/scikit-learn,nvoron23/scikit-learn,fabianp/scikit-learn,LiaoPan/scikit-learn,simon-pepin/scikit-learn,xavierwu/scikit-learn,rajat1994/scikit-learn,xzh86/scikit-learn,dsquareindia/scikit-learn,dhruv13J/scikit-learn,fzalkow/scikit-learn,rajat1994/scikit-learn,samuel1208/scikit-learn,bhargav/scikit-learn,rvraghav93/scikit-learn,lazywei/scikit-learn,yyjiang/scikit-learn,devanshdalal/scikit-learn,Myasuka/scikit-learn,eickenberg/scikit-learn,HolgerPeters/scikit-learn,ningchi/scikit-learn,mrshu/scikit-learn,jmetzen/scikit-learn,harshaneelhg/scikit-learn,alexeyum/scikit-learn,sanketloke/scikit-learn,alvarofierroclavero/scikit-learn,arabenjamin/scikit-learn,Srisai85/scikit-learn,wanggang3333/scikit-learn,amueller/scikit-learn,kashif/scikit-learn,mlyundin/scikit-learn,tawsifkhan/scikit-learn,shyamalschandra/scikit-learn,JsNoNo/scikit-learn,Lawrence-Liu/scikit-learn,btabibian/scikit-learn,phdowling/scikit-learn,raghavrv/scikit-learn,nrhine1/scikit-learn,rahuldhote/scikit-learn,rohanp/scikit-learn,appapantula/scikit-learn,kevin-intel/scikit-learn,hugobowne/scikit-learn,JsNoNo/scikit-learn,jm-begon/scikit-learn,nikitasingh981/scikit-learn,0asa/scikit-learn,altairpearl/scikit-learn,MohammedWasim/scikit-learn,aewhatley/scikit-learn,pompiduskus/scikit-learn,shusenl/scikit-learn,thientu/scikit-learn,vivekmishra1991/scikit-learn,Achuth17/scikit-learn,JsNoNo/scikit-learn,pypot/scikit-learn,MohammedWasim/scikit-learn,glennq/scikit-learn,JosmanPS/scikit-learn,kylerbrown/scikit-learn,ChanderG/scikit-learn,shahankhatch/scikit-learn,zorojean/scikit-learn,sinhrks/scikit-learn,hainm/scikit-learn,cl4rke/scikit-learn,ivannz/scikit-learn,mattilyra/scikit-learn,siutanwong/scikit-learn,depet/scikit-learn,ilo10/scikit-learn,rrohan/scikit-learn,liberatorqjw/scikit-learn,nmayorov/scikit-learn,Djabbz/scikit-learn,abhishekkrthakur/scikit-learn,pratapvardhan/scikit-learn,Srisai85/scikit-learn,cwu2011/scikit-learn,vinayak-mehta/scikit-learn,UNR-AERIAL/scikit-learn,scikit-learn/scikit-learn,mjudsp/Tsallis,joshloyal/scikit-learn,mugizico/scikit-learn,fyffyt/scikit-learn,jorge2703/scikit-learn,chrsrds/scikit-learn,f3r/scikit-learn,TomDLT/scikit-learn,manashmndl/scikit-learn,mfjb/scikit-learn,henrykironde/scikit-learn,anntzer/scikit-learn,xwolf12/scikit-learn,robbymeals/scikit-learn,vshtanko/scikit-learn,Garrett-R/scikit-learn,jseabold/scikit-learn,ngoix/OCRF,mlyundin/scikit-learn,arabenjamin/scikit-learn,pratapvardhan/scikit-learn,xwolf12/scikit-learn,waterponey/scikit-learn,hrjn/scikit-learn,maheshakya/scikit-learn,jseabold/scikit-learn,pkruskal/scikit-learn,ilyes14/scikit-learn,OshynSong/scikit-learn,mikebenfield/scikit-learn,costypetrisor/scikit-learn,MartinSavc/scikit-learn,IshankGulati/scikit-learn,abhishekkrthakur/scikit-learn,JsNoNo/scikit-learn,CVML/scikit-learn,Myasuka/scikit-learn,hitszxp/scikit-learn,mehdidc/scikit-learn,rvraghav93/scikit-learn,roxyboy/scikit-learn,xzh86/scikit-learn,nesterione/scikit-learn,NelisVerhoef/scikit-learn,Djabbz/scikit-learn,mjudsp/Tsallis,nesterione/scikit-learn,Akshay0724/scikit-learn,gclenaghan/scikit-learn,giorgiop/scikit-learn,ahoyosid/scikit-learn,beepee14/scikit-learn,michigraber/scikit-learn,mjudsp/Tsallis,dsquareindia/scikit-learn,procoder317/scikit-learn,chrisburr/scikit-learn,ominux/scikit-learn,moutai/scikit-learn,bthirion/scikit-learn,deepesch/scikit-learn,carrillo/scikit-learn,joshloyal/scikit-learn,ominux/scikit-learn,costypetrisor/scikit-learn,sonnyhu/scikit-learn,rahuldhote/scikit-learn,pompiduskus/scikit-learn,loli/sklearn-ensembletrees,zhenv5/scikit-learn,MechCoder/scikit-learn,jpautom/scikit-learn,saiwing-yeung/scikit-learn,ogrisel/scikit-learn,florian-f/sklearn,lesteve/scikit-learn,rishikksh20/scikit-learn,iismd17/scikit-learn,pythonvietnam/scikit-learn,idlead/scikit-learn,RachitKansal/scikit-learn,hainm/scikit-learn,hdmetor/scikit-learn,spallavolu/scikit-learn,RomainBrault/scikit-learn,luo66/scikit-learn,hugobowne/scikit-learn,hitszxp/scikit-learn,jm-begon/scikit-learn,yanlend/scikit-learn,justincassidy/scikit-learn,lazywei/scikit-learn,fabianp/scikit-learn,wzbozon/scikit-learn,mfjb/scikit-learn,ahoyosid/scikit-learn,ilo10/scikit-learn,sanketloke/scikit-learn,trungnt13/scikit-learn,Barmaley-exe/scikit-learn,icdishb/scikit-learn,luo66/scikit-learn,0x0all/scikit-learn,DSLituiev/scikit-learn,thientu/scikit-learn,kylerbrown/scikit-learn,huobaowangxi/scikit-learn,hsuantien/scikit-learn,imaculate/scikit-learn,ahoyosid/scikit-learn,3manuek/scikit-learn,MatthieuBizien/scikit-learn,kagayakidan/scikit-learn,ilyes14/scikit-learn,thilbern/scikit-learn,glemaitre/scikit-learn,voxlol/scikit-learn,RachitKansal/scikit-learn,saiwing-yeung/scikit-learn,jmschrei/scikit-learn,arjoly/scikit-learn,raghavrv/scikit-learn,Fireblend/scikit-learn,stylianos-kampakis/scikit-learn,schets/scikit-learn,ephes/scikit-learn,alexsavio/scikit-learn,jorge2703/scikit-learn,huzq/scikit-learn,3manuek/scikit-learn,huzq/scikit-learn,dingocuster/scikit-learn,walterreade/scikit-learn,zaxtax/scikit-learn,sumspr/scikit-learn,kylerbrown/scikit-learn,macks22/scikit-learn,MatthieuBizien/scikit-learn,tawsifkhan/scikit-learn,aewhatley/scikit-learn,rvraghav93/scikit-learn,scikit-learn/scikit-learn,vshtanko/scikit-learn,AnasGhrab/scikit-learn,xwolf12/scikit-learn,JeanKossaifi/scikit-learn,vshtanko/scikit-learn,hsiaoyi0504/scikit-learn,rishikksh20/scikit-learn,mhdella/scikit-learn,evgchz/scikit-learn,jkarnows/scikit-learn,carrillo/scikit-learn,JosmanPS/scikit-learn,voxlol/scikit-learn,nelson-liu/scikit-learn,Titan-C/scikit-learn,xiaoxiamii/scikit-learn,kagayakidan/scikit-learn,UNR-AERIAL/scikit-learn,pratapvardhan/scikit-learn,YinongLong/scikit-learn,rohanp/scikit-learn,anirudhjayaraman/scikit-learn,andrewnc/scikit-learn,MartinSavc/scikit-learn,elkingtonmcb/scikit-learn,NelisVerhoef/scikit-learn,mhue/scikit-learn,heli522/scikit-learn,nvoron23/scikit-learn,betatim/scikit-learn,nesterione/scikit-learn,lucidfrontier45/scikit-learn,mattilyra/scikit-learn,belltailjp/scikit-learn,chrisburr/scikit-learn
|
"""
=========================================
PCA 2d projection of of Iris dataset
=========================================
The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour
and Virginica) with 4 attributes: sepal length, sepal width, petal length
and petal width.
Principal Component Analysis (PCA) applied to this data identifies the
combination of attributes (principal components, or directions in the
feature space) that account for the most variance in the data. Here we
plot the different samples on the 2 first principal components.
"""
import pylab as pl
from scikits.learn import datasets
from scikits.learn.pca import PCA
iris = datasets.load_iris()
X = iris.data
y = iris.target
target_names = iris.target_names
pca = PCA(n_comp=2)
X_r = pca.fit(X).transform(X)
# Percentage of variance explained for each components
print pca.explained_variance_
pl.figure()
pl.hold('on')
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
pl.scatter(X_r[y==i,0], X_r[y==i,1], c=c, label=target_name)
pl.hold('off')
pl.legend()
pl.title('PCA of IRIS dataset')
pl.show()
MISC: Remove non-necessary lines from PCA example
|
"""
=========================================
PCA 2d projection of of Iris dataset
=========================================
The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour
and Virginica) with 4 attributes: sepal length, sepal width, petal length
and petal width.
Principal Component Analysis (PCA) applied to this data identifies the
combination of attributes (principal components, or directions in the
feature space) that account for the most variance in the data. Here we
plot the different samples on the 2 first principal components.
"""
import pylab as pl
from scikits.learn import datasets
from scikits.learn.pca import PCA
iris = datasets.load_iris()
X = iris.data
y = iris.target
target_names = iris.target_names
pca = PCA(n_comp=2)
X_r = pca.fit(X).transform(X)
# Percentage of variance explained for each components
print pca.explained_variance_
pl.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
pl.scatter(X_r[y==i,0], X_r[y==i,1], c=c, label=target_name)
pl.legend()
pl.title('PCA of IRIS dataset')
pl.show()
|
<commit_before>"""
=========================================
PCA 2d projection of of Iris dataset
=========================================
The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour
and Virginica) with 4 attributes: sepal length, sepal width, petal length
and petal width.
Principal Component Analysis (PCA) applied to this data identifies the
combination of attributes (principal components, or directions in the
feature space) that account for the most variance in the data. Here we
plot the different samples on the 2 first principal components.
"""
import pylab as pl
from scikits.learn import datasets
from scikits.learn.pca import PCA
iris = datasets.load_iris()
X = iris.data
y = iris.target
target_names = iris.target_names
pca = PCA(n_comp=2)
X_r = pca.fit(X).transform(X)
# Percentage of variance explained for each components
print pca.explained_variance_
pl.figure()
pl.hold('on')
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
pl.scatter(X_r[y==i,0], X_r[y==i,1], c=c, label=target_name)
pl.hold('off')
pl.legend()
pl.title('PCA of IRIS dataset')
pl.show()
<commit_msg>MISC: Remove non-necessary lines from PCA example<commit_after>
|
"""
=========================================
PCA 2d projection of of Iris dataset
=========================================
The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour
and Virginica) with 4 attributes: sepal length, sepal width, petal length
and petal width.
Principal Component Analysis (PCA) applied to this data identifies the
combination of attributes (principal components, or directions in the
feature space) that account for the most variance in the data. Here we
plot the different samples on the 2 first principal components.
"""
import pylab as pl
from scikits.learn import datasets
from scikits.learn.pca import PCA
iris = datasets.load_iris()
X = iris.data
y = iris.target
target_names = iris.target_names
pca = PCA(n_comp=2)
X_r = pca.fit(X).transform(X)
# Percentage of variance explained for each components
print pca.explained_variance_
pl.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
pl.scatter(X_r[y==i,0], X_r[y==i,1], c=c, label=target_name)
pl.legend()
pl.title('PCA of IRIS dataset')
pl.show()
|
"""
=========================================
PCA 2d projection of of Iris dataset
=========================================
The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour
and Virginica) with 4 attributes: sepal length, sepal width, petal length
and petal width.
Principal Component Analysis (PCA) applied to this data identifies the
combination of attributes (principal components, or directions in the
feature space) that account for the most variance in the data. Here we
plot the different samples on the 2 first principal components.
"""
import pylab as pl
from scikits.learn import datasets
from scikits.learn.pca import PCA
iris = datasets.load_iris()
X = iris.data
y = iris.target
target_names = iris.target_names
pca = PCA(n_comp=2)
X_r = pca.fit(X).transform(X)
# Percentage of variance explained for each components
print pca.explained_variance_
pl.figure()
pl.hold('on')
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
pl.scatter(X_r[y==i,0], X_r[y==i,1], c=c, label=target_name)
pl.hold('off')
pl.legend()
pl.title('PCA of IRIS dataset')
pl.show()
MISC: Remove non-necessary lines from PCA example"""
=========================================
PCA 2d projection of of Iris dataset
=========================================
The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour
and Virginica) with 4 attributes: sepal length, sepal width, petal length
and petal width.
Principal Component Analysis (PCA) applied to this data identifies the
combination of attributes (principal components, or directions in the
feature space) that account for the most variance in the data. Here we
plot the different samples on the 2 first principal components.
"""
import pylab as pl
from scikits.learn import datasets
from scikits.learn.pca import PCA
iris = datasets.load_iris()
X = iris.data
y = iris.target
target_names = iris.target_names
pca = PCA(n_comp=2)
X_r = pca.fit(X).transform(X)
# Percentage of variance explained for each components
print pca.explained_variance_
pl.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
pl.scatter(X_r[y==i,0], X_r[y==i,1], c=c, label=target_name)
pl.legend()
pl.title('PCA of IRIS dataset')
pl.show()
|
<commit_before>"""
=========================================
PCA 2d projection of of Iris dataset
=========================================
The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour
and Virginica) with 4 attributes: sepal length, sepal width, petal length
and petal width.
Principal Component Analysis (PCA) applied to this data identifies the
combination of attributes (principal components, or directions in the
feature space) that account for the most variance in the data. Here we
plot the different samples on the 2 first principal components.
"""
import pylab as pl
from scikits.learn import datasets
from scikits.learn.pca import PCA
iris = datasets.load_iris()
X = iris.data
y = iris.target
target_names = iris.target_names
pca = PCA(n_comp=2)
X_r = pca.fit(X).transform(X)
# Percentage of variance explained for each components
print pca.explained_variance_
pl.figure()
pl.hold('on')
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
pl.scatter(X_r[y==i,0], X_r[y==i,1], c=c, label=target_name)
pl.hold('off')
pl.legend()
pl.title('PCA of IRIS dataset')
pl.show()
<commit_msg>MISC: Remove non-necessary lines from PCA example<commit_after>"""
=========================================
PCA 2d projection of of Iris dataset
=========================================
The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour
and Virginica) with 4 attributes: sepal length, sepal width, petal length
and petal width.
Principal Component Analysis (PCA) applied to this data identifies the
combination of attributes (principal components, or directions in the
feature space) that account for the most variance in the data. Here we
plot the different samples on the 2 first principal components.
"""
import pylab as pl
from scikits.learn import datasets
from scikits.learn.pca import PCA
iris = datasets.load_iris()
X = iris.data
y = iris.target
target_names = iris.target_names
pca = PCA(n_comp=2)
X_r = pca.fit(X).transform(X)
# Percentage of variance explained for each components
print pca.explained_variance_
pl.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
pl.scatter(X_r[y==i,0], X_r[y==i,1], c=c, label=target_name)
pl.legend()
pl.title('PCA of IRIS dataset')
pl.show()
|
d85d1edc769a1d80f8cb4b99824a9277beff4c4c
|
python/sliding_window.py
|
python/sliding_window.py
|
'''
This script showing you how to use a sliding window
'''
from itertools import islice
def sliding_window(a, n, step):
'''
a - sequence
n - width of the window
step - window step
'''
z = (islice(a, i, None, step) for i in range(n))
return zip(*z)
##Example
sliding_window(range(10), 2, 1)
|
Add a script to do the sliding window efficiently
|
Add a script to do the sliding window efficiently
|
Python
|
bsd-3-clause
|
qingkaikong/useful_script,qingkaikong/useful_script,qingkaikong/useful_script,qingkaikong/useful_script,qingkaikong/useful_script
|
Add a script to do the sliding window efficiently
|
'''
This script showing you how to use a sliding window
'''
from itertools import islice
def sliding_window(a, n, step):
'''
a - sequence
n - width of the window
step - window step
'''
z = (islice(a, i, None, step) for i in range(n))
return zip(*z)
##Example
sliding_window(range(10), 2, 1)
|
<commit_before><commit_msg>Add a script to do the sliding window efficiently<commit_after>
|
'''
This script showing you how to use a sliding window
'''
from itertools import islice
def sliding_window(a, n, step):
'''
a - sequence
n - width of the window
step - window step
'''
z = (islice(a, i, None, step) for i in range(n))
return zip(*z)
##Example
sliding_window(range(10), 2, 1)
|
Add a script to do the sliding window efficiently'''
This script showing you how to use a sliding window
'''
from itertools import islice
def sliding_window(a, n, step):
'''
a - sequence
n - width of the window
step - window step
'''
z = (islice(a, i, None, step) for i in range(n))
return zip(*z)
##Example
sliding_window(range(10), 2, 1)
|
<commit_before><commit_msg>Add a script to do the sliding window efficiently<commit_after>'''
This script showing you how to use a sliding window
'''
from itertools import islice
def sliding_window(a, n, step):
'''
a - sequence
n - width of the window
step - window step
'''
z = (islice(a, i, None, step) for i in range(n))
return zip(*z)
##Example
sliding_window(range(10), 2, 1)
|
|
5e8db67d31ca24508839bfc702b2fbd8a9a2efe6
|
autoroller.py
|
autoroller.py
|
from random import randint
from itertools import chain
def roll_against(table):
'''Return a randomly chosen entry (a dict value) from a roll table.
table must be a dict of the same format as those created by load_table.'''
# Get the range of rolls that the table caters for.
permitted = list(chain(*table.keys()))
max_permitted = max(permitted)
min_permitted = min(permitted)
# Generated a random integer bounded by the maximum and minimum permitted
# rolls.
roll = randint(min_permitted, max_permitted)
# Check which event the roll corresponds to and return the description of
# that event.
for entry in table.items():
if roll in entry[0]:
return entry[1]
|
Add function to roll against a loaded roll table
|
Add function to roll against a loaded roll table
As referenced in 1f0decf & 6500a93, add function to 'roll against' a
table generated by table_loader.load_table and return the description
of the event associated with the roll.
|
Python
|
mit
|
whonut/Random-Table-Roller,whonut/Random-Table-Roller,whonut/Random-Table-Roller
|
Add function to roll against a loaded roll table
As referenced in 1f0decf & 6500a93, add function to 'roll against' a
table generated by table_loader.load_table and return the description
of the event associated with the roll.
|
from random import randint
from itertools import chain
def roll_against(table):
'''Return a randomly chosen entry (a dict value) from a roll table.
table must be a dict of the same format as those created by load_table.'''
# Get the range of rolls that the table caters for.
permitted = list(chain(*table.keys()))
max_permitted = max(permitted)
min_permitted = min(permitted)
# Generated a random integer bounded by the maximum and minimum permitted
# rolls.
roll = randint(min_permitted, max_permitted)
# Check which event the roll corresponds to and return the description of
# that event.
for entry in table.items():
if roll in entry[0]:
return entry[1]
|
<commit_before><commit_msg>Add function to roll against a loaded roll table
As referenced in 1f0decf & 6500a93, add function to 'roll against' a
table generated by table_loader.load_table and return the description
of the event associated with the roll.<commit_after>
|
from random import randint
from itertools import chain
def roll_against(table):
'''Return a randomly chosen entry (a dict value) from a roll table.
table must be a dict of the same format as those created by load_table.'''
# Get the range of rolls that the table caters for.
permitted = list(chain(*table.keys()))
max_permitted = max(permitted)
min_permitted = min(permitted)
# Generated a random integer bounded by the maximum and minimum permitted
# rolls.
roll = randint(min_permitted, max_permitted)
# Check which event the roll corresponds to and return the description of
# that event.
for entry in table.items():
if roll in entry[0]:
return entry[1]
|
Add function to roll against a loaded roll table
As referenced in 1f0decf & 6500a93, add function to 'roll against' a
table generated by table_loader.load_table and return the description
of the event associated with the roll.from random import randint
from itertools import chain
def roll_against(table):
'''Return a randomly chosen entry (a dict value) from a roll table.
table must be a dict of the same format as those created by load_table.'''
# Get the range of rolls that the table caters for.
permitted = list(chain(*table.keys()))
max_permitted = max(permitted)
min_permitted = min(permitted)
# Generated a random integer bounded by the maximum and minimum permitted
# rolls.
roll = randint(min_permitted, max_permitted)
# Check which event the roll corresponds to and return the description of
# that event.
for entry in table.items():
if roll in entry[0]:
return entry[1]
|
<commit_before><commit_msg>Add function to roll against a loaded roll table
As referenced in 1f0decf & 6500a93, add function to 'roll against' a
table generated by table_loader.load_table and return the description
of the event associated with the roll.<commit_after>from random import randint
from itertools import chain
def roll_against(table):
'''Return a randomly chosen entry (a dict value) from a roll table.
table must be a dict of the same format as those created by load_table.'''
# Get the range of rolls that the table caters for.
permitted = list(chain(*table.keys()))
max_permitted = max(permitted)
min_permitted = min(permitted)
# Generated a random integer bounded by the maximum and minimum permitted
# rolls.
roll = randint(min_permitted, max_permitted)
# Check which event the roll corresponds to and return the description of
# that event.
for entry in table.items():
if roll in entry[0]:
return entry[1]
|
|
ee5a0017277b49e3a4a27a2d571c10f86563023c
|
examples/plotPtcls.py
|
examples/plotPtcls.py
|
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
def get_data(i):
return np.load('ptcls' + str(i) + '.npy')
def in_range(lx, position):
return position[0] < lx and position[0] > -lx and position[1] < lx and position[1] > -lx and position[2] < lx and position[2] > -lx
def plot_data_3d(i, lx = 2.0e-4, show_electrons=False):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_xlim3d(-lx, lx)
ax.set_ylim3d(-lx, lx)
ax.set_zlim3d(-lx, lx)
data = get_data(i)
n_ions = data.shape[1]/2
ions = data[0:3,n_ions:]
ions_in_range = ions[:,np.where([in_range(lx, ions[:,i]) for i in range(n_ions)])]
electrons = data[0:3,n_ions:]
electrons_in_range = electrons[:,np.where([in_range(lx, electrons[:,e]) for e in range(n_ions)])]
if show_electrons:
ax.scatter(electrons_in_range[0],electrons_in_range[1],electrons_in_range[2],c='r',marker='.',s=5,lw=0)
ax.scatter(ions_in_range[0],ions_in_range[1],ions_in_range[2],c='b',marker='.',s=10,lw=0)
plt.show()
def plot_data(i, lx = 2.0e-4, show_electrons=False):
data = get_data(i)
n_ions = data.shape[1]/2
ions = data[0:3,:n_ions]
#ions_in_range = ions[:,np.where([in_range(lx, ions[:,i]) for i in range(n_ions)])]
ions_in_range = ions
electrons = data[0:3,n_ions:]
electrons_in_range = electrons[:,np.where([in_range(lx, electrons[:,e]) for e in range(n_ions)])]
if show_electrons:
plt.plot(electrons_in_range[0],electrons_in_range[1],c='r',marker='.',ms=5,lw=0)
plt.plot(ions_in_range[0],ions_in_range[1],c='b',marker='.',ms=10,lw=0)
plt.show()
|
Add a few utility functions for plotting particle distributions.
|
Add a few utility functions for plotting particle distributions.
|
Python
|
mit
|
Tech-XCorp/ultracold-ions,hosseinsadeghi/ultracold-ions,Tech-XCorp/ultracold-ions,hosseinsadeghi/ultracold-ions
|
Add a few utility functions for plotting particle distributions.
|
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
def get_data(i):
return np.load('ptcls' + str(i) + '.npy')
def in_range(lx, position):
return position[0] < lx and position[0] > -lx and position[1] < lx and position[1] > -lx and position[2] < lx and position[2] > -lx
def plot_data_3d(i, lx = 2.0e-4, show_electrons=False):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_xlim3d(-lx, lx)
ax.set_ylim3d(-lx, lx)
ax.set_zlim3d(-lx, lx)
data = get_data(i)
n_ions = data.shape[1]/2
ions = data[0:3,n_ions:]
ions_in_range = ions[:,np.where([in_range(lx, ions[:,i]) for i in range(n_ions)])]
electrons = data[0:3,n_ions:]
electrons_in_range = electrons[:,np.where([in_range(lx, electrons[:,e]) for e in range(n_ions)])]
if show_electrons:
ax.scatter(electrons_in_range[0],electrons_in_range[1],electrons_in_range[2],c='r',marker='.',s=5,lw=0)
ax.scatter(ions_in_range[0],ions_in_range[1],ions_in_range[2],c='b',marker='.',s=10,lw=0)
plt.show()
def plot_data(i, lx = 2.0e-4, show_electrons=False):
data = get_data(i)
n_ions = data.shape[1]/2
ions = data[0:3,:n_ions]
#ions_in_range = ions[:,np.where([in_range(lx, ions[:,i]) for i in range(n_ions)])]
ions_in_range = ions
electrons = data[0:3,n_ions:]
electrons_in_range = electrons[:,np.where([in_range(lx, electrons[:,e]) for e in range(n_ions)])]
if show_electrons:
plt.plot(electrons_in_range[0],electrons_in_range[1],c='r',marker='.',ms=5,lw=0)
plt.plot(ions_in_range[0],ions_in_range[1],c='b',marker='.',ms=10,lw=0)
plt.show()
|
<commit_before><commit_msg>Add a few utility functions for plotting particle distributions.<commit_after>
|
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
def get_data(i):
return np.load('ptcls' + str(i) + '.npy')
def in_range(lx, position):
return position[0] < lx and position[0] > -lx and position[1] < lx and position[1] > -lx and position[2] < lx and position[2] > -lx
def plot_data_3d(i, lx = 2.0e-4, show_electrons=False):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_xlim3d(-lx, lx)
ax.set_ylim3d(-lx, lx)
ax.set_zlim3d(-lx, lx)
data = get_data(i)
n_ions = data.shape[1]/2
ions = data[0:3,n_ions:]
ions_in_range = ions[:,np.where([in_range(lx, ions[:,i]) for i in range(n_ions)])]
electrons = data[0:3,n_ions:]
electrons_in_range = electrons[:,np.where([in_range(lx, electrons[:,e]) for e in range(n_ions)])]
if show_electrons:
ax.scatter(electrons_in_range[0],electrons_in_range[1],electrons_in_range[2],c='r',marker='.',s=5,lw=0)
ax.scatter(ions_in_range[0],ions_in_range[1],ions_in_range[2],c='b',marker='.',s=10,lw=0)
plt.show()
def plot_data(i, lx = 2.0e-4, show_electrons=False):
data = get_data(i)
n_ions = data.shape[1]/2
ions = data[0:3,:n_ions]
#ions_in_range = ions[:,np.where([in_range(lx, ions[:,i]) for i in range(n_ions)])]
ions_in_range = ions
electrons = data[0:3,n_ions:]
electrons_in_range = electrons[:,np.where([in_range(lx, electrons[:,e]) for e in range(n_ions)])]
if show_electrons:
plt.plot(electrons_in_range[0],electrons_in_range[1],c='r',marker='.',ms=5,lw=0)
plt.plot(ions_in_range[0],ions_in_range[1],c='b',marker='.',ms=10,lw=0)
plt.show()
|
Add a few utility functions for plotting particle distributions.import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
def get_data(i):
return np.load('ptcls' + str(i) + '.npy')
def in_range(lx, position):
return position[0] < lx and position[0] > -lx and position[1] < lx and position[1] > -lx and position[2] < lx and position[2] > -lx
def plot_data_3d(i, lx = 2.0e-4, show_electrons=False):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_xlim3d(-lx, lx)
ax.set_ylim3d(-lx, lx)
ax.set_zlim3d(-lx, lx)
data = get_data(i)
n_ions = data.shape[1]/2
ions = data[0:3,n_ions:]
ions_in_range = ions[:,np.where([in_range(lx, ions[:,i]) for i in range(n_ions)])]
electrons = data[0:3,n_ions:]
electrons_in_range = electrons[:,np.where([in_range(lx, electrons[:,e]) for e in range(n_ions)])]
if show_electrons:
ax.scatter(electrons_in_range[0],electrons_in_range[1],electrons_in_range[2],c='r',marker='.',s=5,lw=0)
ax.scatter(ions_in_range[0],ions_in_range[1],ions_in_range[2],c='b',marker='.',s=10,lw=0)
plt.show()
def plot_data(i, lx = 2.0e-4, show_electrons=False):
data = get_data(i)
n_ions = data.shape[1]/2
ions = data[0:3,:n_ions]
#ions_in_range = ions[:,np.where([in_range(lx, ions[:,i]) for i in range(n_ions)])]
ions_in_range = ions
electrons = data[0:3,n_ions:]
electrons_in_range = electrons[:,np.where([in_range(lx, electrons[:,e]) for e in range(n_ions)])]
if show_electrons:
plt.plot(electrons_in_range[0],electrons_in_range[1],c='r',marker='.',ms=5,lw=0)
plt.plot(ions_in_range[0],ions_in_range[1],c='b',marker='.',ms=10,lw=0)
plt.show()
|
<commit_before><commit_msg>Add a few utility functions for plotting particle distributions.<commit_after>import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
def get_data(i):
return np.load('ptcls' + str(i) + '.npy')
def in_range(lx, position):
return position[0] < lx and position[0] > -lx and position[1] < lx and position[1] > -lx and position[2] < lx and position[2] > -lx
def plot_data_3d(i, lx = 2.0e-4, show_electrons=False):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_xlim3d(-lx, lx)
ax.set_ylim3d(-lx, lx)
ax.set_zlim3d(-lx, lx)
data = get_data(i)
n_ions = data.shape[1]/2
ions = data[0:3,n_ions:]
ions_in_range = ions[:,np.where([in_range(lx, ions[:,i]) for i in range(n_ions)])]
electrons = data[0:3,n_ions:]
electrons_in_range = electrons[:,np.where([in_range(lx, electrons[:,e]) for e in range(n_ions)])]
if show_electrons:
ax.scatter(electrons_in_range[0],electrons_in_range[1],electrons_in_range[2],c='r',marker='.',s=5,lw=0)
ax.scatter(ions_in_range[0],ions_in_range[1],ions_in_range[2],c='b',marker='.',s=10,lw=0)
plt.show()
def plot_data(i, lx = 2.0e-4, show_electrons=False):
data = get_data(i)
n_ions = data.shape[1]/2
ions = data[0:3,:n_ions]
#ions_in_range = ions[:,np.where([in_range(lx, ions[:,i]) for i in range(n_ions)])]
ions_in_range = ions
electrons = data[0:3,n_ions:]
electrons_in_range = electrons[:,np.where([in_range(lx, electrons[:,e]) for e in range(n_ions)])]
if show_electrons:
plt.plot(electrons_in_range[0],electrons_in_range[1],c='r',marker='.',ms=5,lw=0)
plt.plot(ions_in_range[0],ions_in_range[1],c='b',marker='.',ms=10,lw=0)
plt.show()
|
|
5033f95757c414bb223e09ee9c2c9b535c89df3e
|
tests/unit/catalogue/test_models.py
|
tests/unit/catalogue/test_models.py
|
import pytest
from django.core.exceptions import ValidationError
from oscar.apps.catalogue import models
def test_product_attributes_can_contain_underscores():
attr = models.ProductAttribute(name="A", code="a_b")
attr.full_clean()
def test_product_attributes_cant_contain_hyphens():
attr = models.ProductAttribute(name="A", code="a-b")
with pytest.raises(ValidationError):
attr.full_clean()
|
Add tests to verify hyphens are not allowed in attribute codes
|
Add tests to verify hyphens are not allowed in attribute codes
|
Python
|
bsd-3-clause
|
binarydud/django-oscar,okfish/django-oscar,solarissmoke/django-oscar,lijoantony/django-oscar,WillisXChen/django-oscar,MatthewWilkes/django-oscar,bnprk/django-oscar,jmt4/django-oscar,Bogh/django-oscar,bnprk/django-oscar,vovanbo/django-oscar,itbabu/django-oscar,spartonia/django-oscar,rocopartners/django-oscar,john-parton/django-oscar,kapari/django-oscar,mexeniz/django-oscar,jmt4/django-oscar,vovanbo/django-oscar,thechampanurag/django-oscar,pdonadeo/django-oscar,taedori81/django-oscar,anentropic/django-oscar,amirrpp/django-oscar,taedori81/django-oscar,WadeYuChen/django-oscar,jlmadurga/django-oscar,nfletton/django-oscar,ka7eh/django-oscar,rocopartners/django-oscar,Jannes123/django-oscar,jmt4/django-oscar,spartonia/django-oscar,bnprk/django-oscar,anentropic/django-oscar,nfletton/django-oscar,anentropic/django-oscar,bschuon/django-oscar,jlmadurga/django-oscar,saadatqadri/django-oscar,vovanbo/django-oscar,faratro/django-oscar,thechampanurag/django-oscar,sasha0/django-oscar,thechampanurag/django-oscar,mexeniz/django-oscar,binarydud/django-oscar,dongguangming/django-oscar,MatthewWilkes/django-oscar,sasha0/django-oscar,thechampanurag/django-oscar,itbabu/django-oscar,pasqualguerrero/django-oscar,lijoantony/django-oscar,pdonadeo/django-oscar,faratro/django-oscar,nfletton/django-oscar,dongguangming/django-oscar,rocopartners/django-oscar,okfish/django-oscar,WadeYuChen/django-oscar,sonofatailor/django-oscar,nickpack/django-oscar,vovanbo/django-oscar,Bogh/django-oscar,amirrpp/django-oscar,eddiep1101/django-oscar,mexeniz/django-oscar,WadeYuChen/django-oscar,solarissmoke/django-oscar,nfletton/django-oscar,Jannes123/django-oscar,amirrpp/django-oscar,sonofatailor/django-oscar,john-parton/django-oscar,michaelkuty/django-oscar,WillisXChen/django-oscar,kapari/django-oscar,sasha0/django-oscar,pasqualguerrero/django-oscar,QLGu/django-oscar,QLGu/django-oscar,eddiep1101/django-oscar,spartonia/django-oscar,lijoantony/django-oscar,lijoantony/django-oscar,anentropic/django-oscar,binarydud/django-oscar,Jannes123/django-oscar,taedori81/django-oscar,dongguangming/django-oscar,pasqualguerrero/django-oscar,itbabu/django-oscar,amirrpp/django-oscar,django-oscar/django-oscar,rocopartners/django-oscar,Bogh/django-oscar,MatthewWilkes/django-oscar,ka7eh/django-oscar,pdonadeo/django-oscar,itbabu/django-oscar,bschuon/django-oscar,WillisXChen/django-oscar,saadatqadri/django-oscar,ka7eh/django-oscar,jmt4/django-oscar,QLGu/django-oscar,bschuon/django-oscar,QLGu/django-oscar,dongguangming/django-oscar,spartonia/django-oscar,WillisXChen/django-oscar,WadeYuChen/django-oscar,jlmadurga/django-oscar,bschuon/django-oscar,sasha0/django-oscar,pdonadeo/django-oscar,WillisXChen/django-oscar,Bogh/django-oscar,WillisXChen/django-oscar,michaelkuty/django-oscar,saadatqadri/django-oscar,sonofatailor/django-oscar,john-parton/django-oscar,faratro/django-oscar,binarydud/django-oscar,jlmadurga/django-oscar,ka7eh/django-oscar,nickpack/django-oscar,okfish/django-oscar,taedori81/django-oscar,sonofatailor/django-oscar,MatthewWilkes/django-oscar,eddiep1101/django-oscar,john-parton/django-oscar,faratro/django-oscar,michaelkuty/django-oscar,michaelkuty/django-oscar,solarissmoke/django-oscar,nickpack/django-oscar,kapari/django-oscar,nickpack/django-oscar,solarissmoke/django-oscar,saadatqadri/django-oscar,django-oscar/django-oscar,kapari/django-oscar,bnprk/django-oscar,Jannes123/django-oscar,okfish/django-oscar,pasqualguerrero/django-oscar,mexeniz/django-oscar,django-oscar/django-oscar,eddiep1101/django-oscar,django-oscar/django-oscar
|
Add tests to verify hyphens are not allowed in attribute codes
|
import pytest
from django.core.exceptions import ValidationError
from oscar.apps.catalogue import models
def test_product_attributes_can_contain_underscores():
attr = models.ProductAttribute(name="A", code="a_b")
attr.full_clean()
def test_product_attributes_cant_contain_hyphens():
attr = models.ProductAttribute(name="A", code="a-b")
with pytest.raises(ValidationError):
attr.full_clean()
|
<commit_before><commit_msg>Add tests to verify hyphens are not allowed in attribute codes<commit_after>
|
import pytest
from django.core.exceptions import ValidationError
from oscar.apps.catalogue import models
def test_product_attributes_can_contain_underscores():
attr = models.ProductAttribute(name="A", code="a_b")
attr.full_clean()
def test_product_attributes_cant_contain_hyphens():
attr = models.ProductAttribute(name="A", code="a-b")
with pytest.raises(ValidationError):
attr.full_clean()
|
Add tests to verify hyphens are not allowed in attribute codesimport pytest
from django.core.exceptions import ValidationError
from oscar.apps.catalogue import models
def test_product_attributes_can_contain_underscores():
attr = models.ProductAttribute(name="A", code="a_b")
attr.full_clean()
def test_product_attributes_cant_contain_hyphens():
attr = models.ProductAttribute(name="A", code="a-b")
with pytest.raises(ValidationError):
attr.full_clean()
|
<commit_before><commit_msg>Add tests to verify hyphens are not allowed in attribute codes<commit_after>import pytest
from django.core.exceptions import ValidationError
from oscar.apps.catalogue import models
def test_product_attributes_can_contain_underscores():
attr = models.ProductAttribute(name="A", code="a_b")
attr.full_clean()
def test_product_attributes_cant_contain_hyphens():
attr = models.ProductAttribute(name="A", code="a-b")
with pytest.raises(ValidationError):
attr.full_clean()
|
|
506175af6040ff159ca32836c71792d00649e9f6
|
build-js.py
|
build-js.py
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import subprocess
options = ["--compress", "--mangle", "--screw-ie8", "--output"]
outputFile = "codetriangle.min.js"
outputPath = os.path.join(os.getcwd(), "build", "js", outputFile)
exceptions = ["lib/prism.js"]
finalCommand = ["uglifyjs"]
os.chdir("js")
# Get the get the third-party libaries to be minified
for f in os.listdir(os.path.join(os.getcwd(), "lib")):
f = "lib/{0}".format(f)
if f not in exceptions and not os.path.isdir(f):
finalCommand.append(f)
# Get the files to be minified
for f in os.listdir(os.getcwd()):
if f not in exceptions and f != outputFile and not os.path.isdir(f):
finalCommand.append(f)
# Add the app options and output file
finalCommand += options
finalCommand.append(outputPath)
print(finalCommand)
# Build the JS file
subprocess.call(finalCommand, shell=True)
print("JS built successfully.")
raise SystemExit(0)
|
Add Python-based JS build script
|
Add Python-based JS build script
Still needs to be ported to JS
|
Python
|
mit
|
le717/CodeTriangle.me,le717/CodeTriangle,le717/CodeTriangle,le717/CodeTriangle.me,le717/CodeTriangle.me
|
Add Python-based JS build script
Still needs to be ported to JS
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import subprocess
options = ["--compress", "--mangle", "--screw-ie8", "--output"]
outputFile = "codetriangle.min.js"
outputPath = os.path.join(os.getcwd(), "build", "js", outputFile)
exceptions = ["lib/prism.js"]
finalCommand = ["uglifyjs"]
os.chdir("js")
# Get the get the third-party libaries to be minified
for f in os.listdir(os.path.join(os.getcwd(), "lib")):
f = "lib/{0}".format(f)
if f not in exceptions and not os.path.isdir(f):
finalCommand.append(f)
# Get the files to be minified
for f in os.listdir(os.getcwd()):
if f not in exceptions and f != outputFile and not os.path.isdir(f):
finalCommand.append(f)
# Add the app options and output file
finalCommand += options
finalCommand.append(outputPath)
print(finalCommand)
# Build the JS file
subprocess.call(finalCommand, shell=True)
print("JS built successfully.")
raise SystemExit(0)
|
<commit_before><commit_msg>Add Python-based JS build script
Still needs to be ported to JS<commit_after>
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import subprocess
options = ["--compress", "--mangle", "--screw-ie8", "--output"]
outputFile = "codetriangle.min.js"
outputPath = os.path.join(os.getcwd(), "build", "js", outputFile)
exceptions = ["lib/prism.js"]
finalCommand = ["uglifyjs"]
os.chdir("js")
# Get the get the third-party libaries to be minified
for f in os.listdir(os.path.join(os.getcwd(), "lib")):
f = "lib/{0}".format(f)
if f not in exceptions and not os.path.isdir(f):
finalCommand.append(f)
# Get the files to be minified
for f in os.listdir(os.getcwd()):
if f not in exceptions and f != outputFile and not os.path.isdir(f):
finalCommand.append(f)
# Add the app options and output file
finalCommand += options
finalCommand.append(outputPath)
print(finalCommand)
# Build the JS file
subprocess.call(finalCommand, shell=True)
print("JS built successfully.")
raise SystemExit(0)
|
Add Python-based JS build script
Still needs to be ported to JS#! /usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import subprocess
options = ["--compress", "--mangle", "--screw-ie8", "--output"]
outputFile = "codetriangle.min.js"
outputPath = os.path.join(os.getcwd(), "build", "js", outputFile)
exceptions = ["lib/prism.js"]
finalCommand = ["uglifyjs"]
os.chdir("js")
# Get the get the third-party libaries to be minified
for f in os.listdir(os.path.join(os.getcwd(), "lib")):
f = "lib/{0}".format(f)
if f not in exceptions and not os.path.isdir(f):
finalCommand.append(f)
# Get the files to be minified
for f in os.listdir(os.getcwd()):
if f not in exceptions and f != outputFile and not os.path.isdir(f):
finalCommand.append(f)
# Add the app options and output file
finalCommand += options
finalCommand.append(outputPath)
print(finalCommand)
# Build the JS file
subprocess.call(finalCommand, shell=True)
print("JS built successfully.")
raise SystemExit(0)
|
<commit_before><commit_msg>Add Python-based JS build script
Still needs to be ported to JS<commit_after>#! /usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import subprocess
options = ["--compress", "--mangle", "--screw-ie8", "--output"]
outputFile = "codetriangle.min.js"
outputPath = os.path.join(os.getcwd(), "build", "js", outputFile)
exceptions = ["lib/prism.js"]
finalCommand = ["uglifyjs"]
os.chdir("js")
# Get the get the third-party libaries to be minified
for f in os.listdir(os.path.join(os.getcwd(), "lib")):
f = "lib/{0}".format(f)
if f not in exceptions and not os.path.isdir(f):
finalCommand.append(f)
# Get the files to be minified
for f in os.listdir(os.getcwd()):
if f not in exceptions and f != outputFile and not os.path.isdir(f):
finalCommand.append(f)
# Add the app options and output file
finalCommand += options
finalCommand.append(outputPath)
print(finalCommand)
# Build the JS file
subprocess.call(finalCommand, shell=True)
print("JS built successfully.")
raise SystemExit(0)
|
|
b47f49bcb53b707bf9ef1b981b9851ea08805237
|
autocorr_spectra.py
|
autocorr_spectra.py
|
# -*- coding: utf-8 -*-
"""Module to compute the autocorrelation of quantities along spectra"""
import _autocorr_spectra_priv
def autocorr_spectra(slist, spos, pixsz, nbins=100):
"""
Find the autocorrelation function from a list of spectra
Spectra are assumed to be along the same axis.
slist - list of quantity along spectra to autocorrelate. npix * nspectra
spos - positions of the spectra: 2x nspectra: (x, y).
nbins - number of bins in output autocorrelation function
pixsz - Size of a pixel in units of the spectra position.
"""
(modes, auto) = _autocorr_spectra_priv.autocorr_spectra(slist, spos, pixsz, nbins)
auto /= modes
return auto
|
Add module for computing autocorrelation of spectra
|
Add module for computing autocorrelation of spectra
|
Python
|
mit
|
sbird/vw_spectra
|
Add module for computing autocorrelation of spectra
|
# -*- coding: utf-8 -*-
"""Module to compute the autocorrelation of quantities along spectra"""
import _autocorr_spectra_priv
def autocorr_spectra(slist, spos, pixsz, nbins=100):
"""
Find the autocorrelation function from a list of spectra
Spectra are assumed to be along the same axis.
slist - list of quantity along spectra to autocorrelate. npix * nspectra
spos - positions of the spectra: 2x nspectra: (x, y).
nbins - number of bins in output autocorrelation function
pixsz - Size of a pixel in units of the spectra position.
"""
(modes, auto) = _autocorr_spectra_priv.autocorr_spectra(slist, spos, pixsz, nbins)
auto /= modes
return auto
|
<commit_before><commit_msg>Add module for computing autocorrelation of spectra<commit_after>
|
# -*- coding: utf-8 -*-
"""Module to compute the autocorrelation of quantities along spectra"""
import _autocorr_spectra_priv
def autocorr_spectra(slist, spos, pixsz, nbins=100):
"""
Find the autocorrelation function from a list of spectra
Spectra are assumed to be along the same axis.
slist - list of quantity along spectra to autocorrelate. npix * nspectra
spos - positions of the spectra: 2x nspectra: (x, y).
nbins - number of bins in output autocorrelation function
pixsz - Size of a pixel in units of the spectra position.
"""
(modes, auto) = _autocorr_spectra_priv.autocorr_spectra(slist, spos, pixsz, nbins)
auto /= modes
return auto
|
Add module for computing autocorrelation of spectra# -*- coding: utf-8 -*-
"""Module to compute the autocorrelation of quantities along spectra"""
import _autocorr_spectra_priv
def autocorr_spectra(slist, spos, pixsz, nbins=100):
"""
Find the autocorrelation function from a list of spectra
Spectra are assumed to be along the same axis.
slist - list of quantity along spectra to autocorrelate. npix * nspectra
spos - positions of the spectra: 2x nspectra: (x, y).
nbins - number of bins in output autocorrelation function
pixsz - Size of a pixel in units of the spectra position.
"""
(modes, auto) = _autocorr_spectra_priv.autocorr_spectra(slist, spos, pixsz, nbins)
auto /= modes
return auto
|
<commit_before><commit_msg>Add module for computing autocorrelation of spectra<commit_after># -*- coding: utf-8 -*-
"""Module to compute the autocorrelation of quantities along spectra"""
import _autocorr_spectra_priv
def autocorr_spectra(slist, spos, pixsz, nbins=100):
"""
Find the autocorrelation function from a list of spectra
Spectra are assumed to be along the same axis.
slist - list of quantity along spectra to autocorrelate. npix * nspectra
spos - positions of the spectra: 2x nspectra: (x, y).
nbins - number of bins in output autocorrelation function
pixsz - Size of a pixel in units of the spectra position.
"""
(modes, auto) = _autocorr_spectra_priv.autocorr_spectra(slist, spos, pixsz, nbins)
auto /= modes
return auto
|
|
2d96cd4629ae0ab026c14508634deeab521996cd
|
tests/test_api_2stepinit.py
|
tests/test_api_2stepinit.py
|
# -*- coding: utf-8 -*-
import json
import binascii
from .base import MyTestCase
from privacyidea.lib.tokens.HMAC import HmacOtp
class TwoStepInitTestCase(MyTestCase):
"""
test the 2stepinit process.
Here we enroll an HOTP token. One part of the secret key is generated by
privacyIDEA and the second part is generated by the client.
A successful authentication with the new key is performed.
"""
def test_01_init_token(self):
with self.app.test_request_context('/token/init',
method='POST',
data={"type": "hotp",
"genkey": "1",
"2stepinit": "1"},
headers={'Authorization': self.at}):
res = self.app.full_dispatch_request()
self.assertTrue(res.status_code == 200, res)
result = json.loads(res.data).get("result")
self.assertTrue(result.get("status") is True, result)
self.assertTrue(result.get("value") is True, result)
detail = json.loads(res.data).get("detail")
serial = detail.get("serial")
otpkey_url = detail.get("otpkey", {}).get("value")
server_component = otpkey_url.split("/")[2]
client_component = "AAAAAAAA"
with self.app.test_request_context('/token/init',
method='POST',
data={"type": "hotp",
"serial": serial,
"otpkey": client_component},
headers={'Authorization': self.at}):
res = self.app.full_dispatch_request()
self.assertTrue(res.status_code == 200, res)
result = json.loads(res.data).get("result")
self.assertTrue(result.get("status") is True, result)
self.assertTrue(result.get("value") is True, result)
detail = json.loads(res.data).get("detail")
otpkey_url = detail.get("otpkey", {}).get("value")
otpkey = otpkey_url.split("/")[2]
# Now try to authenticate
otpkey_bin = binascii.unhexlify(otpkey)
otp_value = HmacOtp().generate(key=otpkey_bin, counter=1)
with self.app.test_request_context('/validate/check',
method='POST',
data={"serial": serial,
"pass": otp_value}):
res = self.app.full_dispatch_request()
self.assertTrue(res.status_code == 200, res)
result = json.loads(res.data).get("result")
self.assertEqual(result.get("status"), True)
self.assertEqual(result.get("value"), True)
|
Test an enrollment and succesful auth.
|
Test an enrollment and succesful auth.
Working on #627
|
Python
|
agpl-3.0
|
privacyidea/privacyidea,privacyidea/privacyidea,privacyidea/privacyidea,wheldom01/privacyidea,jh23453/privacyidea,privacyidea/privacyidea,wheldom01/privacyidea,privacyidea/privacyidea,jh23453/privacyidea,jh23453/privacyidea,wheldom01/privacyidea,jh23453/privacyidea,jh23453/privacyidea,wheldom01/privacyidea,privacyidea/privacyidea,wheldom01/privacyidea,jh23453/privacyidea,wheldom01/privacyidea
|
Test an enrollment and succesful auth.
Working on #627
|
# -*- coding: utf-8 -*-
import json
import binascii
from .base import MyTestCase
from privacyidea.lib.tokens.HMAC import HmacOtp
class TwoStepInitTestCase(MyTestCase):
"""
test the 2stepinit process.
Here we enroll an HOTP token. One part of the secret key is generated by
privacyIDEA and the second part is generated by the client.
A successful authentication with the new key is performed.
"""
def test_01_init_token(self):
with self.app.test_request_context('/token/init',
method='POST',
data={"type": "hotp",
"genkey": "1",
"2stepinit": "1"},
headers={'Authorization': self.at}):
res = self.app.full_dispatch_request()
self.assertTrue(res.status_code == 200, res)
result = json.loads(res.data).get("result")
self.assertTrue(result.get("status") is True, result)
self.assertTrue(result.get("value") is True, result)
detail = json.loads(res.data).get("detail")
serial = detail.get("serial")
otpkey_url = detail.get("otpkey", {}).get("value")
server_component = otpkey_url.split("/")[2]
client_component = "AAAAAAAA"
with self.app.test_request_context('/token/init',
method='POST',
data={"type": "hotp",
"serial": serial,
"otpkey": client_component},
headers={'Authorization': self.at}):
res = self.app.full_dispatch_request()
self.assertTrue(res.status_code == 200, res)
result = json.loads(res.data).get("result")
self.assertTrue(result.get("status") is True, result)
self.assertTrue(result.get("value") is True, result)
detail = json.loads(res.data).get("detail")
otpkey_url = detail.get("otpkey", {}).get("value")
otpkey = otpkey_url.split("/")[2]
# Now try to authenticate
otpkey_bin = binascii.unhexlify(otpkey)
otp_value = HmacOtp().generate(key=otpkey_bin, counter=1)
with self.app.test_request_context('/validate/check',
method='POST',
data={"serial": serial,
"pass": otp_value}):
res = self.app.full_dispatch_request()
self.assertTrue(res.status_code == 200, res)
result = json.loads(res.data).get("result")
self.assertEqual(result.get("status"), True)
self.assertEqual(result.get("value"), True)
|
<commit_before><commit_msg>Test an enrollment and succesful auth.
Working on #627<commit_after>
|
# -*- coding: utf-8 -*-
import json
import binascii
from .base import MyTestCase
from privacyidea.lib.tokens.HMAC import HmacOtp
class TwoStepInitTestCase(MyTestCase):
"""
test the 2stepinit process.
Here we enroll an HOTP token. One part of the secret key is generated by
privacyIDEA and the second part is generated by the client.
A successful authentication with the new key is performed.
"""
def test_01_init_token(self):
with self.app.test_request_context('/token/init',
method='POST',
data={"type": "hotp",
"genkey": "1",
"2stepinit": "1"},
headers={'Authorization': self.at}):
res = self.app.full_dispatch_request()
self.assertTrue(res.status_code == 200, res)
result = json.loads(res.data).get("result")
self.assertTrue(result.get("status") is True, result)
self.assertTrue(result.get("value") is True, result)
detail = json.loads(res.data).get("detail")
serial = detail.get("serial")
otpkey_url = detail.get("otpkey", {}).get("value")
server_component = otpkey_url.split("/")[2]
client_component = "AAAAAAAA"
with self.app.test_request_context('/token/init',
method='POST',
data={"type": "hotp",
"serial": serial,
"otpkey": client_component},
headers={'Authorization': self.at}):
res = self.app.full_dispatch_request()
self.assertTrue(res.status_code == 200, res)
result = json.loads(res.data).get("result")
self.assertTrue(result.get("status") is True, result)
self.assertTrue(result.get("value") is True, result)
detail = json.loads(res.data).get("detail")
otpkey_url = detail.get("otpkey", {}).get("value")
otpkey = otpkey_url.split("/")[2]
# Now try to authenticate
otpkey_bin = binascii.unhexlify(otpkey)
otp_value = HmacOtp().generate(key=otpkey_bin, counter=1)
with self.app.test_request_context('/validate/check',
method='POST',
data={"serial": serial,
"pass": otp_value}):
res = self.app.full_dispatch_request()
self.assertTrue(res.status_code == 200, res)
result = json.loads(res.data).get("result")
self.assertEqual(result.get("status"), True)
self.assertEqual(result.get("value"), True)
|
Test an enrollment and succesful auth.
Working on #627# -*- coding: utf-8 -*-
import json
import binascii
from .base import MyTestCase
from privacyidea.lib.tokens.HMAC import HmacOtp
class TwoStepInitTestCase(MyTestCase):
"""
test the 2stepinit process.
Here we enroll an HOTP token. One part of the secret key is generated by
privacyIDEA and the second part is generated by the client.
A successful authentication with the new key is performed.
"""
def test_01_init_token(self):
with self.app.test_request_context('/token/init',
method='POST',
data={"type": "hotp",
"genkey": "1",
"2stepinit": "1"},
headers={'Authorization': self.at}):
res = self.app.full_dispatch_request()
self.assertTrue(res.status_code == 200, res)
result = json.loads(res.data).get("result")
self.assertTrue(result.get("status") is True, result)
self.assertTrue(result.get("value") is True, result)
detail = json.loads(res.data).get("detail")
serial = detail.get("serial")
otpkey_url = detail.get("otpkey", {}).get("value")
server_component = otpkey_url.split("/")[2]
client_component = "AAAAAAAA"
with self.app.test_request_context('/token/init',
method='POST',
data={"type": "hotp",
"serial": serial,
"otpkey": client_component},
headers={'Authorization': self.at}):
res = self.app.full_dispatch_request()
self.assertTrue(res.status_code == 200, res)
result = json.loads(res.data).get("result")
self.assertTrue(result.get("status") is True, result)
self.assertTrue(result.get("value") is True, result)
detail = json.loads(res.data).get("detail")
otpkey_url = detail.get("otpkey", {}).get("value")
otpkey = otpkey_url.split("/")[2]
# Now try to authenticate
otpkey_bin = binascii.unhexlify(otpkey)
otp_value = HmacOtp().generate(key=otpkey_bin, counter=1)
with self.app.test_request_context('/validate/check',
method='POST',
data={"serial": serial,
"pass": otp_value}):
res = self.app.full_dispatch_request()
self.assertTrue(res.status_code == 200, res)
result = json.loads(res.data).get("result")
self.assertEqual(result.get("status"), True)
self.assertEqual(result.get("value"), True)
|
<commit_before><commit_msg>Test an enrollment and succesful auth.
Working on #627<commit_after># -*- coding: utf-8 -*-
import json
import binascii
from .base import MyTestCase
from privacyidea.lib.tokens.HMAC import HmacOtp
class TwoStepInitTestCase(MyTestCase):
"""
test the 2stepinit process.
Here we enroll an HOTP token. One part of the secret key is generated by
privacyIDEA and the second part is generated by the client.
A successful authentication with the new key is performed.
"""
def test_01_init_token(self):
with self.app.test_request_context('/token/init',
method='POST',
data={"type": "hotp",
"genkey": "1",
"2stepinit": "1"},
headers={'Authorization': self.at}):
res = self.app.full_dispatch_request()
self.assertTrue(res.status_code == 200, res)
result = json.loads(res.data).get("result")
self.assertTrue(result.get("status") is True, result)
self.assertTrue(result.get("value") is True, result)
detail = json.loads(res.data).get("detail")
serial = detail.get("serial")
otpkey_url = detail.get("otpkey", {}).get("value")
server_component = otpkey_url.split("/")[2]
client_component = "AAAAAAAA"
with self.app.test_request_context('/token/init',
method='POST',
data={"type": "hotp",
"serial": serial,
"otpkey": client_component},
headers={'Authorization': self.at}):
res = self.app.full_dispatch_request()
self.assertTrue(res.status_code == 200, res)
result = json.loads(res.data).get("result")
self.assertTrue(result.get("status") is True, result)
self.assertTrue(result.get("value") is True, result)
detail = json.loads(res.data).get("detail")
otpkey_url = detail.get("otpkey", {}).get("value")
otpkey = otpkey_url.split("/")[2]
# Now try to authenticate
otpkey_bin = binascii.unhexlify(otpkey)
otp_value = HmacOtp().generate(key=otpkey_bin, counter=1)
with self.app.test_request_context('/validate/check',
method='POST',
data={"serial": serial,
"pass": otp_value}):
res = self.app.full_dispatch_request()
self.assertTrue(res.status_code == 200, res)
result = json.loads(res.data).get("result")
self.assertEqual(result.get("status"), True)
self.assertEqual(result.get("value"), True)
|
|
9fd7b4477cc8420af1b8d120cd4386e8a0cb82d1
|
tests/test_plugin_scroll.py
|
tests/test_plugin_scroll.py
|
#!/usr/bin/env python
import app_config
app_config.DATABASE_NAME = 'carebot_test.db'
app_config.DEFAULT_CONFIG_PATH = 'tests/config_test.yml'
try:
import unittest2 as unittest
except ImportError:
import unittest
import datetime
from plugins.npr.scrolldepth import NPRScrollDepth
from util.config import Config
class TestNPRSCrollDepth(unittest.TestCase):
def test_fill_in_max(self):
test_data = [[1, 100, 3], [1, 200, 3], [1, 500, 3], [1, 200, 3]]
results = NPRScrollDepth.fill_in_max(test_data)
self.assertEqual(results[0][1], 500)
self.assertEqual(results[1][1], 500)
self.assertEqual(results[2][1], 500)
self.assertEqual(results[3][1], 200)
|
Add simple test to scroll plugin
|
Add simple test to scroll plugin
|
Python
|
mit
|
thecarebot/carebot,thecarebot/carebot,thecarebot/carebot
|
Add simple test to scroll plugin
|
#!/usr/bin/env python
import app_config
app_config.DATABASE_NAME = 'carebot_test.db'
app_config.DEFAULT_CONFIG_PATH = 'tests/config_test.yml'
try:
import unittest2 as unittest
except ImportError:
import unittest
import datetime
from plugins.npr.scrolldepth import NPRScrollDepth
from util.config import Config
class TestNPRSCrollDepth(unittest.TestCase):
def test_fill_in_max(self):
test_data = [[1, 100, 3], [1, 200, 3], [1, 500, 3], [1, 200, 3]]
results = NPRScrollDepth.fill_in_max(test_data)
self.assertEqual(results[0][1], 500)
self.assertEqual(results[1][1], 500)
self.assertEqual(results[2][1], 500)
self.assertEqual(results[3][1], 200)
|
<commit_before><commit_msg>Add simple test to scroll plugin<commit_after>
|
#!/usr/bin/env python
import app_config
app_config.DATABASE_NAME = 'carebot_test.db'
app_config.DEFAULT_CONFIG_PATH = 'tests/config_test.yml'
try:
import unittest2 as unittest
except ImportError:
import unittest
import datetime
from plugins.npr.scrolldepth import NPRScrollDepth
from util.config import Config
class TestNPRSCrollDepth(unittest.TestCase):
def test_fill_in_max(self):
test_data = [[1, 100, 3], [1, 200, 3], [1, 500, 3], [1, 200, 3]]
results = NPRScrollDepth.fill_in_max(test_data)
self.assertEqual(results[0][1], 500)
self.assertEqual(results[1][1], 500)
self.assertEqual(results[2][1], 500)
self.assertEqual(results[3][1], 200)
|
Add simple test to scroll plugin#!/usr/bin/env python
import app_config
app_config.DATABASE_NAME = 'carebot_test.db'
app_config.DEFAULT_CONFIG_PATH = 'tests/config_test.yml'
try:
import unittest2 as unittest
except ImportError:
import unittest
import datetime
from plugins.npr.scrolldepth import NPRScrollDepth
from util.config import Config
class TestNPRSCrollDepth(unittest.TestCase):
def test_fill_in_max(self):
test_data = [[1, 100, 3], [1, 200, 3], [1, 500, 3], [1, 200, 3]]
results = NPRScrollDepth.fill_in_max(test_data)
self.assertEqual(results[0][1], 500)
self.assertEqual(results[1][1], 500)
self.assertEqual(results[2][1], 500)
self.assertEqual(results[3][1], 200)
|
<commit_before><commit_msg>Add simple test to scroll plugin<commit_after>#!/usr/bin/env python
import app_config
app_config.DATABASE_NAME = 'carebot_test.db'
app_config.DEFAULT_CONFIG_PATH = 'tests/config_test.yml'
try:
import unittest2 as unittest
except ImportError:
import unittest
import datetime
from plugins.npr.scrolldepth import NPRScrollDepth
from util.config import Config
class TestNPRSCrollDepth(unittest.TestCase):
def test_fill_in_max(self):
test_data = [[1, 100, 3], [1, 200, 3], [1, 500, 3], [1, 200, 3]]
results = NPRScrollDepth.fill_in_max(test_data)
self.assertEqual(results[0][1], 500)
self.assertEqual(results[1][1], 500)
self.assertEqual(results[2][1], 500)
self.assertEqual(results[3][1], 200)
|
|
1536633a8515db4667ed9ca60fd33d07d42e1bed
|
dbaas/workflow/steps/mysql/region_migration/revoke_nfs_access.py
|
dbaas/workflow/steps/mysql/region_migration/revoke_nfs_access.py
|
# -*- coding: utf-8 -*-
import logging
from util import full_stack
from dbaas_nfsaas.provider import NfsaasProvider
from workflow.steps.util.base import BaseStep
from workflow.exceptions.error_codes import DBAAS_0020
LOG = logging.getLogger(__name__)
class RevokeNFSAccess(BaseStep):
def __unicode__(self):
return "Revoking nfs access..."
def do(self, workflow_dict):
try:
databaseinfra = workflow_dict['databaseinfra']
source_host = workflow_dict['source_hosts'][0]
target_host = source_host.future_host
nfsaas_export_id = source_host.nfsaas_host_attributes.all()[0].nfsaas_export_id
NfsaasProvider.revoke_access(environment=databaseinfra.environment,
plan=databaseinfra.plan,
host=target_host,
export_id=nfsaas_export_id)
return True
except Exception:
traceback = full_stack()
workflow_dict['exceptions']['error_codes'].append(DBAAS_0020)
workflow_dict['exceptions']['traceback'].append(traceback)
return False
def undo(self, workflow_dict):
LOG.info("Running undo...")
try:
return True
except Exception:
traceback = full_stack()
workflow_dict['exceptions']['error_codes'].append(DBAAS_0020)
workflow_dict['exceptions']['traceback'].append(traceback)
return False
|
Add step to revoke nfs access
|
Add step to revoke nfs access
|
Python
|
bsd-3-clause
|
globocom/database-as-a-service,globocom/database-as-a-service,globocom/database-as-a-service,globocom/database-as-a-service
|
Add step to revoke nfs access
|
# -*- coding: utf-8 -*-
import logging
from util import full_stack
from dbaas_nfsaas.provider import NfsaasProvider
from workflow.steps.util.base import BaseStep
from workflow.exceptions.error_codes import DBAAS_0020
LOG = logging.getLogger(__name__)
class RevokeNFSAccess(BaseStep):
def __unicode__(self):
return "Revoking nfs access..."
def do(self, workflow_dict):
try:
databaseinfra = workflow_dict['databaseinfra']
source_host = workflow_dict['source_hosts'][0]
target_host = source_host.future_host
nfsaas_export_id = source_host.nfsaas_host_attributes.all()[0].nfsaas_export_id
NfsaasProvider.revoke_access(environment=databaseinfra.environment,
plan=databaseinfra.plan,
host=target_host,
export_id=nfsaas_export_id)
return True
except Exception:
traceback = full_stack()
workflow_dict['exceptions']['error_codes'].append(DBAAS_0020)
workflow_dict['exceptions']['traceback'].append(traceback)
return False
def undo(self, workflow_dict):
LOG.info("Running undo...")
try:
return True
except Exception:
traceback = full_stack()
workflow_dict['exceptions']['error_codes'].append(DBAAS_0020)
workflow_dict['exceptions']['traceback'].append(traceback)
return False
|
<commit_before><commit_msg>Add step to revoke nfs access<commit_after>
|
# -*- coding: utf-8 -*-
import logging
from util import full_stack
from dbaas_nfsaas.provider import NfsaasProvider
from workflow.steps.util.base import BaseStep
from workflow.exceptions.error_codes import DBAAS_0020
LOG = logging.getLogger(__name__)
class RevokeNFSAccess(BaseStep):
def __unicode__(self):
return "Revoking nfs access..."
def do(self, workflow_dict):
try:
databaseinfra = workflow_dict['databaseinfra']
source_host = workflow_dict['source_hosts'][0]
target_host = source_host.future_host
nfsaas_export_id = source_host.nfsaas_host_attributes.all()[0].nfsaas_export_id
NfsaasProvider.revoke_access(environment=databaseinfra.environment,
plan=databaseinfra.plan,
host=target_host,
export_id=nfsaas_export_id)
return True
except Exception:
traceback = full_stack()
workflow_dict['exceptions']['error_codes'].append(DBAAS_0020)
workflow_dict['exceptions']['traceback'].append(traceback)
return False
def undo(self, workflow_dict):
LOG.info("Running undo...")
try:
return True
except Exception:
traceback = full_stack()
workflow_dict['exceptions']['error_codes'].append(DBAAS_0020)
workflow_dict['exceptions']['traceback'].append(traceback)
return False
|
Add step to revoke nfs access# -*- coding: utf-8 -*-
import logging
from util import full_stack
from dbaas_nfsaas.provider import NfsaasProvider
from workflow.steps.util.base import BaseStep
from workflow.exceptions.error_codes import DBAAS_0020
LOG = logging.getLogger(__name__)
class RevokeNFSAccess(BaseStep):
def __unicode__(self):
return "Revoking nfs access..."
def do(self, workflow_dict):
try:
databaseinfra = workflow_dict['databaseinfra']
source_host = workflow_dict['source_hosts'][0]
target_host = source_host.future_host
nfsaas_export_id = source_host.nfsaas_host_attributes.all()[0].nfsaas_export_id
NfsaasProvider.revoke_access(environment=databaseinfra.environment,
plan=databaseinfra.plan,
host=target_host,
export_id=nfsaas_export_id)
return True
except Exception:
traceback = full_stack()
workflow_dict['exceptions']['error_codes'].append(DBAAS_0020)
workflow_dict['exceptions']['traceback'].append(traceback)
return False
def undo(self, workflow_dict):
LOG.info("Running undo...")
try:
return True
except Exception:
traceback = full_stack()
workflow_dict['exceptions']['error_codes'].append(DBAAS_0020)
workflow_dict['exceptions']['traceback'].append(traceback)
return False
|
<commit_before><commit_msg>Add step to revoke nfs access<commit_after># -*- coding: utf-8 -*-
import logging
from util import full_stack
from dbaas_nfsaas.provider import NfsaasProvider
from workflow.steps.util.base import BaseStep
from workflow.exceptions.error_codes import DBAAS_0020
LOG = logging.getLogger(__name__)
class RevokeNFSAccess(BaseStep):
def __unicode__(self):
return "Revoking nfs access..."
def do(self, workflow_dict):
try:
databaseinfra = workflow_dict['databaseinfra']
source_host = workflow_dict['source_hosts'][0]
target_host = source_host.future_host
nfsaas_export_id = source_host.nfsaas_host_attributes.all()[0].nfsaas_export_id
NfsaasProvider.revoke_access(environment=databaseinfra.environment,
plan=databaseinfra.plan,
host=target_host,
export_id=nfsaas_export_id)
return True
except Exception:
traceback = full_stack()
workflow_dict['exceptions']['error_codes'].append(DBAAS_0020)
workflow_dict['exceptions']['traceback'].append(traceback)
return False
def undo(self, workflow_dict):
LOG.info("Running undo...")
try:
return True
except Exception:
traceback = full_stack()
workflow_dict['exceptions']['error_codes'].append(DBAAS_0020)
workflow_dict['exceptions']['traceback'].append(traceback)
return False
|
|
7c7f673eeb731baf186db66a26147b67b8762eae
|
examples/rpc_with_subhanlers.py
|
examples/rpc_with_subhanlers.py
|
import asyncio
import aiozmq
import aiozmq.rpc
class Handler(aiozmq.rpc.AttrHandler):
def __init__(self, ident):
self.ident = ident
self.subhandler = SubHandler(self.ident, 'subident')
@aiozmq.rpc.method
def a(self):
return (self.ident, 'a')
class SubHandler(aiozmq.rpc.AttrHandler):
def __init__(self, ident, subident):
self.ident = ident
self.subident = subident
@aiozmq.rpc.method
def b(self):
return (self.ident, self.subident, 'b')
@asyncio.coroutine
def go():
server = yield from aiozmq.rpc.start_server(
Handler('ident'), bind='tcp://*:*')
server_addr = next(iter(server.transport.bindings()))
client = yield from aiozmq.rpc.open_client(
connect=server_addr)
ret = yield from client.rpc.a()
assert ('ident', 'a') == ret
ret = yield from client.rpc.subhandler.b()
assert ('ident', 'subident', 'b') == ret
server.close()
client.close()
def main():
asyncio.set_event_loop_policy(aiozmq.ZmqEventLoopPolicy())
asyncio.get_event_loop().run_until_complete(go())
print("DONE")
if __name__ == '__main__':
main()
|
Add example for subhandlers of AttrHandler
|
Add example for subhandlers of AttrHandler
|
Python
|
bsd-2-clause
|
MetaMemoryT/aiozmq,aio-libs/aiozmq,claws/aiozmq,asteven/aiozmq
|
Add example for subhandlers of AttrHandler
|
import asyncio
import aiozmq
import aiozmq.rpc
class Handler(aiozmq.rpc.AttrHandler):
def __init__(self, ident):
self.ident = ident
self.subhandler = SubHandler(self.ident, 'subident')
@aiozmq.rpc.method
def a(self):
return (self.ident, 'a')
class SubHandler(aiozmq.rpc.AttrHandler):
def __init__(self, ident, subident):
self.ident = ident
self.subident = subident
@aiozmq.rpc.method
def b(self):
return (self.ident, self.subident, 'b')
@asyncio.coroutine
def go():
server = yield from aiozmq.rpc.start_server(
Handler('ident'), bind='tcp://*:*')
server_addr = next(iter(server.transport.bindings()))
client = yield from aiozmq.rpc.open_client(
connect=server_addr)
ret = yield from client.rpc.a()
assert ('ident', 'a') == ret
ret = yield from client.rpc.subhandler.b()
assert ('ident', 'subident', 'b') == ret
server.close()
client.close()
def main():
asyncio.set_event_loop_policy(aiozmq.ZmqEventLoopPolicy())
asyncio.get_event_loop().run_until_complete(go())
print("DONE")
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add example for subhandlers of AttrHandler<commit_after>
|
import asyncio
import aiozmq
import aiozmq.rpc
class Handler(aiozmq.rpc.AttrHandler):
def __init__(self, ident):
self.ident = ident
self.subhandler = SubHandler(self.ident, 'subident')
@aiozmq.rpc.method
def a(self):
return (self.ident, 'a')
class SubHandler(aiozmq.rpc.AttrHandler):
def __init__(self, ident, subident):
self.ident = ident
self.subident = subident
@aiozmq.rpc.method
def b(self):
return (self.ident, self.subident, 'b')
@asyncio.coroutine
def go():
server = yield from aiozmq.rpc.start_server(
Handler('ident'), bind='tcp://*:*')
server_addr = next(iter(server.transport.bindings()))
client = yield from aiozmq.rpc.open_client(
connect=server_addr)
ret = yield from client.rpc.a()
assert ('ident', 'a') == ret
ret = yield from client.rpc.subhandler.b()
assert ('ident', 'subident', 'b') == ret
server.close()
client.close()
def main():
asyncio.set_event_loop_policy(aiozmq.ZmqEventLoopPolicy())
asyncio.get_event_loop().run_until_complete(go())
print("DONE")
if __name__ == '__main__':
main()
|
Add example for subhandlers of AttrHandlerimport asyncio
import aiozmq
import aiozmq.rpc
class Handler(aiozmq.rpc.AttrHandler):
def __init__(self, ident):
self.ident = ident
self.subhandler = SubHandler(self.ident, 'subident')
@aiozmq.rpc.method
def a(self):
return (self.ident, 'a')
class SubHandler(aiozmq.rpc.AttrHandler):
def __init__(self, ident, subident):
self.ident = ident
self.subident = subident
@aiozmq.rpc.method
def b(self):
return (self.ident, self.subident, 'b')
@asyncio.coroutine
def go():
server = yield from aiozmq.rpc.start_server(
Handler('ident'), bind='tcp://*:*')
server_addr = next(iter(server.transport.bindings()))
client = yield from aiozmq.rpc.open_client(
connect=server_addr)
ret = yield from client.rpc.a()
assert ('ident', 'a') == ret
ret = yield from client.rpc.subhandler.b()
assert ('ident', 'subident', 'b') == ret
server.close()
client.close()
def main():
asyncio.set_event_loop_policy(aiozmq.ZmqEventLoopPolicy())
asyncio.get_event_loop().run_until_complete(go())
print("DONE")
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add example for subhandlers of AttrHandler<commit_after>import asyncio
import aiozmq
import aiozmq.rpc
class Handler(aiozmq.rpc.AttrHandler):
def __init__(self, ident):
self.ident = ident
self.subhandler = SubHandler(self.ident, 'subident')
@aiozmq.rpc.method
def a(self):
return (self.ident, 'a')
class SubHandler(aiozmq.rpc.AttrHandler):
def __init__(self, ident, subident):
self.ident = ident
self.subident = subident
@aiozmq.rpc.method
def b(self):
return (self.ident, self.subident, 'b')
@asyncio.coroutine
def go():
server = yield from aiozmq.rpc.start_server(
Handler('ident'), bind='tcp://*:*')
server_addr = next(iter(server.transport.bindings()))
client = yield from aiozmq.rpc.open_client(
connect=server_addr)
ret = yield from client.rpc.a()
assert ('ident', 'a') == ret
ret = yield from client.rpc.subhandler.b()
assert ('ident', 'subident', 'b') == ret
server.close()
client.close()
def main():
asyncio.set_event_loop_policy(aiozmq.ZmqEventLoopPolicy())
asyncio.get_event_loop().run_until_complete(go())
print("DONE")
if __name__ == '__main__':
main()
|
|
2dd10b10878de50911b6420226f90cb1eae2db5d
|
merge_docstrings.py
|
merge_docstrings.py
|
#!/usr/bin/env python
import lxml.etree as etree
import subprocess, optparse, sys
def main():
p = optparse.OptionParser(usage="""%prog FILE1.xml FILE2.xml FILE3.xml
Output changes between FILE2.xml and FILE3.xml applied to FILE1.xml
""")
options, args = p.parse_args()
if len(args) != 3:
p.error("Wrong number of arguments")
tree1 = etree.parse(open(args[0], 'r'))
tree2 = etree.parse(open(args[1], 'r'))
tree3 = etree.parse(open(args[2], 'r'))
for el1 in tree1.getroot():
el2 = tree2.get(el1.attrib['id'])
el3 = tree2.get(el1.attrib['id'])
if el2 is None or el3 is None: continue
if el3.text == el2.text: continue
if el3.text == el1.text: continue
if el1.text is None: el1.text = ""
if el2.text is None: el2.text = ""
if el3.text is None: el3.text = ""
new_text, conflict = merge_3way(el1.text, el2.text, el3.text)
if conflict:
print >> sys.stderr, "CONFLICT", el1.attrib['id']
el1.text = new_text
def merge_3way(base, file1, file2):
"""
Perform a 3-way merge, inserting changes between base and file1 to file2.
Returns
-------
out : str
Resulting new file1, possibly with conflict markers
conflict : bool
Whether a conflict occurred in merge.
"""
f1 = tempfile.NamedTemporaryFile()
f2 = tempfile.NamedTemporaryFile()
f3 = tempfile.NamedTemporaryFile()
f1.write(file2)
f2.write(base)
f3.write(file1)
f1.flush()
f2.flush()
f3.flush()
p = subprocess.Popen(['merge', '-p',
'-L', 'web version',
'-L', 'old svn version',
'-L', 'new svn version',
f1.name, f2.name, f3.name],
stdout=subprocess.PIPE)
out, err = p.communicate()
if p.returncode != 0:
return out, True
return out, False
if __name__ == "__main__": main()
|
Add docstring 3-way merge tool
|
Add docstring 3-way merge tool
|
Python
|
bsd-3-clause
|
pv/pydocweb,pv/pydocweb
|
Add docstring 3-way merge tool
|
#!/usr/bin/env python
import lxml.etree as etree
import subprocess, optparse, sys
def main():
p = optparse.OptionParser(usage="""%prog FILE1.xml FILE2.xml FILE3.xml
Output changes between FILE2.xml and FILE3.xml applied to FILE1.xml
""")
options, args = p.parse_args()
if len(args) != 3:
p.error("Wrong number of arguments")
tree1 = etree.parse(open(args[0], 'r'))
tree2 = etree.parse(open(args[1], 'r'))
tree3 = etree.parse(open(args[2], 'r'))
for el1 in tree1.getroot():
el2 = tree2.get(el1.attrib['id'])
el3 = tree2.get(el1.attrib['id'])
if el2 is None or el3 is None: continue
if el3.text == el2.text: continue
if el3.text == el1.text: continue
if el1.text is None: el1.text = ""
if el2.text is None: el2.text = ""
if el3.text is None: el3.text = ""
new_text, conflict = merge_3way(el1.text, el2.text, el3.text)
if conflict:
print >> sys.stderr, "CONFLICT", el1.attrib['id']
el1.text = new_text
def merge_3way(base, file1, file2):
"""
Perform a 3-way merge, inserting changes between base and file1 to file2.
Returns
-------
out : str
Resulting new file1, possibly with conflict markers
conflict : bool
Whether a conflict occurred in merge.
"""
f1 = tempfile.NamedTemporaryFile()
f2 = tempfile.NamedTemporaryFile()
f3 = tempfile.NamedTemporaryFile()
f1.write(file2)
f2.write(base)
f3.write(file1)
f1.flush()
f2.flush()
f3.flush()
p = subprocess.Popen(['merge', '-p',
'-L', 'web version',
'-L', 'old svn version',
'-L', 'new svn version',
f1.name, f2.name, f3.name],
stdout=subprocess.PIPE)
out, err = p.communicate()
if p.returncode != 0:
return out, True
return out, False
if __name__ == "__main__": main()
|
<commit_before><commit_msg>Add docstring 3-way merge tool<commit_after>
|
#!/usr/bin/env python
import lxml.etree as etree
import subprocess, optparse, sys
def main():
p = optparse.OptionParser(usage="""%prog FILE1.xml FILE2.xml FILE3.xml
Output changes between FILE2.xml and FILE3.xml applied to FILE1.xml
""")
options, args = p.parse_args()
if len(args) != 3:
p.error("Wrong number of arguments")
tree1 = etree.parse(open(args[0], 'r'))
tree2 = etree.parse(open(args[1], 'r'))
tree3 = etree.parse(open(args[2], 'r'))
for el1 in tree1.getroot():
el2 = tree2.get(el1.attrib['id'])
el3 = tree2.get(el1.attrib['id'])
if el2 is None or el3 is None: continue
if el3.text == el2.text: continue
if el3.text == el1.text: continue
if el1.text is None: el1.text = ""
if el2.text is None: el2.text = ""
if el3.text is None: el3.text = ""
new_text, conflict = merge_3way(el1.text, el2.text, el3.text)
if conflict:
print >> sys.stderr, "CONFLICT", el1.attrib['id']
el1.text = new_text
def merge_3way(base, file1, file2):
"""
Perform a 3-way merge, inserting changes between base and file1 to file2.
Returns
-------
out : str
Resulting new file1, possibly with conflict markers
conflict : bool
Whether a conflict occurred in merge.
"""
f1 = tempfile.NamedTemporaryFile()
f2 = tempfile.NamedTemporaryFile()
f3 = tempfile.NamedTemporaryFile()
f1.write(file2)
f2.write(base)
f3.write(file1)
f1.flush()
f2.flush()
f3.flush()
p = subprocess.Popen(['merge', '-p',
'-L', 'web version',
'-L', 'old svn version',
'-L', 'new svn version',
f1.name, f2.name, f3.name],
stdout=subprocess.PIPE)
out, err = p.communicate()
if p.returncode != 0:
return out, True
return out, False
if __name__ == "__main__": main()
|
Add docstring 3-way merge tool#!/usr/bin/env python
import lxml.etree as etree
import subprocess, optparse, sys
def main():
p = optparse.OptionParser(usage="""%prog FILE1.xml FILE2.xml FILE3.xml
Output changes between FILE2.xml and FILE3.xml applied to FILE1.xml
""")
options, args = p.parse_args()
if len(args) != 3:
p.error("Wrong number of arguments")
tree1 = etree.parse(open(args[0], 'r'))
tree2 = etree.parse(open(args[1], 'r'))
tree3 = etree.parse(open(args[2], 'r'))
for el1 in tree1.getroot():
el2 = tree2.get(el1.attrib['id'])
el3 = tree2.get(el1.attrib['id'])
if el2 is None or el3 is None: continue
if el3.text == el2.text: continue
if el3.text == el1.text: continue
if el1.text is None: el1.text = ""
if el2.text is None: el2.text = ""
if el3.text is None: el3.text = ""
new_text, conflict = merge_3way(el1.text, el2.text, el3.text)
if conflict:
print >> sys.stderr, "CONFLICT", el1.attrib['id']
el1.text = new_text
def merge_3way(base, file1, file2):
"""
Perform a 3-way merge, inserting changes between base and file1 to file2.
Returns
-------
out : str
Resulting new file1, possibly with conflict markers
conflict : bool
Whether a conflict occurred in merge.
"""
f1 = tempfile.NamedTemporaryFile()
f2 = tempfile.NamedTemporaryFile()
f3 = tempfile.NamedTemporaryFile()
f1.write(file2)
f2.write(base)
f3.write(file1)
f1.flush()
f2.flush()
f3.flush()
p = subprocess.Popen(['merge', '-p',
'-L', 'web version',
'-L', 'old svn version',
'-L', 'new svn version',
f1.name, f2.name, f3.name],
stdout=subprocess.PIPE)
out, err = p.communicate()
if p.returncode != 0:
return out, True
return out, False
if __name__ == "__main__": main()
|
<commit_before><commit_msg>Add docstring 3-way merge tool<commit_after>#!/usr/bin/env python
import lxml.etree as etree
import subprocess, optparse, sys
def main():
p = optparse.OptionParser(usage="""%prog FILE1.xml FILE2.xml FILE3.xml
Output changes between FILE2.xml and FILE3.xml applied to FILE1.xml
""")
options, args = p.parse_args()
if len(args) != 3:
p.error("Wrong number of arguments")
tree1 = etree.parse(open(args[0], 'r'))
tree2 = etree.parse(open(args[1], 'r'))
tree3 = etree.parse(open(args[2], 'r'))
for el1 in tree1.getroot():
el2 = tree2.get(el1.attrib['id'])
el3 = tree2.get(el1.attrib['id'])
if el2 is None or el3 is None: continue
if el3.text == el2.text: continue
if el3.text == el1.text: continue
if el1.text is None: el1.text = ""
if el2.text is None: el2.text = ""
if el3.text is None: el3.text = ""
new_text, conflict = merge_3way(el1.text, el2.text, el3.text)
if conflict:
print >> sys.stderr, "CONFLICT", el1.attrib['id']
el1.text = new_text
def merge_3way(base, file1, file2):
"""
Perform a 3-way merge, inserting changes between base and file1 to file2.
Returns
-------
out : str
Resulting new file1, possibly with conflict markers
conflict : bool
Whether a conflict occurred in merge.
"""
f1 = tempfile.NamedTemporaryFile()
f2 = tempfile.NamedTemporaryFile()
f3 = tempfile.NamedTemporaryFile()
f1.write(file2)
f2.write(base)
f3.write(file1)
f1.flush()
f2.flush()
f3.flush()
p = subprocess.Popen(['merge', '-p',
'-L', 'web version',
'-L', 'old svn version',
'-L', 'new svn version',
f1.name, f2.name, f3.name],
stdout=subprocess.PIPE)
out, err = p.communicate()
if p.returncode != 0:
return out, True
return out, False
if __name__ == "__main__": main()
|
|
0db1d34f3dceb015474f8a22a2cc52e6fdd92b76
|
bluebottle/deeds/migrations/0011_auto_20211209_1640.py
|
bluebottle/deeds/migrations/0011_auto_20211209_1640.py
|
# Generated by Django 2.2.24 on 2021-12-09 15:40
from django.db import migrations
def fix_participant_dates(apps, schema_editor):
EffortContribution = apps.get_model('activities', 'EffortContribution')
EffortContribution.objects.update(end=None)
class Migration(migrations.Migration):
dependencies = [
('deeds', '0010_auto_20211208_0833'),
]
operations = [
migrations.RunPython(
fix_participant_dates,
migrations.RunPython.noop
)
]
|
Remove end date from deed contributions
|
Remove end date from deed contributions
|
Python
|
bsd-3-clause
|
onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle
|
Remove end date from deed contributions
|
# Generated by Django 2.2.24 on 2021-12-09 15:40
from django.db import migrations
def fix_participant_dates(apps, schema_editor):
EffortContribution = apps.get_model('activities', 'EffortContribution')
EffortContribution.objects.update(end=None)
class Migration(migrations.Migration):
dependencies = [
('deeds', '0010_auto_20211208_0833'),
]
operations = [
migrations.RunPython(
fix_participant_dates,
migrations.RunPython.noop
)
]
|
<commit_before><commit_msg>Remove end date from deed contributions<commit_after>
|
# Generated by Django 2.2.24 on 2021-12-09 15:40
from django.db import migrations
def fix_participant_dates(apps, schema_editor):
EffortContribution = apps.get_model('activities', 'EffortContribution')
EffortContribution.objects.update(end=None)
class Migration(migrations.Migration):
dependencies = [
('deeds', '0010_auto_20211208_0833'),
]
operations = [
migrations.RunPython(
fix_participant_dates,
migrations.RunPython.noop
)
]
|
Remove end date from deed contributions# Generated by Django 2.2.24 on 2021-12-09 15:40
from django.db import migrations
def fix_participant_dates(apps, schema_editor):
EffortContribution = apps.get_model('activities', 'EffortContribution')
EffortContribution.objects.update(end=None)
class Migration(migrations.Migration):
dependencies = [
('deeds', '0010_auto_20211208_0833'),
]
operations = [
migrations.RunPython(
fix_participant_dates,
migrations.RunPython.noop
)
]
|
<commit_before><commit_msg>Remove end date from deed contributions<commit_after># Generated by Django 2.2.24 on 2021-12-09 15:40
from django.db import migrations
def fix_participant_dates(apps, schema_editor):
EffortContribution = apps.get_model('activities', 'EffortContribution')
EffortContribution.objects.update(end=None)
class Migration(migrations.Migration):
dependencies = [
('deeds', '0010_auto_20211208_0833'),
]
operations = [
migrations.RunPython(
fix_participant_dates,
migrations.RunPython.noop
)
]
|
|
6e3d49bf270d240290c8c4b11b7242f8b11d3de3
|
simplemooc/courses/migrations/0002_auto_20160504_2321.py
|
simplemooc/courses/migrations/0002_auto_20160504_2321.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('courses', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='lesson',
name='name',
field=models.CharField(verbose_name='informar conteúdo', max_length=100),
),
migrations.AlterField(
model_name='lessontrb',
name='name',
field=models.CharField(verbose_name='informar conteúdo', max_length=100),
),
]
|
Create migration file to change of verbose_name of Lessons models
|
Create migration file to change of verbose_name of Lessons models
|
Python
|
mit
|
mazulo/simplemooc,mazulo/simplemooc
|
Create migration file to change of verbose_name of Lessons models
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('courses', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='lesson',
name='name',
field=models.CharField(verbose_name='informar conteúdo', max_length=100),
),
migrations.AlterField(
model_name='lessontrb',
name='name',
field=models.CharField(verbose_name='informar conteúdo', max_length=100),
),
]
|
<commit_before><commit_msg>Create migration file to change of verbose_name of Lessons models<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('courses', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='lesson',
name='name',
field=models.CharField(verbose_name='informar conteúdo', max_length=100),
),
migrations.AlterField(
model_name='lessontrb',
name='name',
field=models.CharField(verbose_name='informar conteúdo', max_length=100),
),
]
|
Create migration file to change of verbose_name of Lessons models# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('courses', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='lesson',
name='name',
field=models.CharField(verbose_name='informar conteúdo', max_length=100),
),
migrations.AlterField(
model_name='lessontrb',
name='name',
field=models.CharField(verbose_name='informar conteúdo', max_length=100),
),
]
|
<commit_before><commit_msg>Create migration file to change of verbose_name of Lessons models<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('courses', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='lesson',
name='name',
field=models.CharField(verbose_name='informar conteúdo', max_length=100),
),
migrations.AlterField(
model_name='lessontrb',
name='name',
field=models.CharField(verbose_name='informar conteúdo', max_length=100),
),
]
|
|
982a57cc37d611519a40eed1afd6e713dae096fc
|
beer_search_v2/management/commands/batch_announce.py
|
beer_search_v2/management/commands/batch_announce.py
|
from beer_search_v2.models import ProductType
from django.core.management.base import BaseCommand
class Command(BaseCommand):
def handle(self, *args, **options):
"""
Prints out a markdown-formatted list of products that can be posted as a whole
"""
product_types = ProductType.objects.filter(needs_announcement=True)
if product_types:
for p in product_types.all():
print("[{}](http://bjorleit.info{})\n".format(p.alias, p.get_absolute_url()))
p.needs_announcement = False
p.save()
print("{} vörur alls".format(product_types.count()))
else:
print("No product types are in need of announcement.")
|
Make a batch announcement script for generating gists
|
Make a batch announcement script for generating gists
|
Python
|
mit
|
Ernir/bjorleitin,Ernir/bjorleitin,Ernir/bjorleitin,Ernir/bjorleitin
|
Make a batch announcement script for generating gists
|
from beer_search_v2.models import ProductType
from django.core.management.base import BaseCommand
class Command(BaseCommand):
def handle(self, *args, **options):
"""
Prints out a markdown-formatted list of products that can be posted as a whole
"""
product_types = ProductType.objects.filter(needs_announcement=True)
if product_types:
for p in product_types.all():
print("[{}](http://bjorleit.info{})\n".format(p.alias, p.get_absolute_url()))
p.needs_announcement = False
p.save()
print("{} vörur alls".format(product_types.count()))
else:
print("No product types are in need of announcement.")
|
<commit_before><commit_msg>Make a batch announcement script for generating gists<commit_after>
|
from beer_search_v2.models import ProductType
from django.core.management.base import BaseCommand
class Command(BaseCommand):
def handle(self, *args, **options):
"""
Prints out a markdown-formatted list of products that can be posted as a whole
"""
product_types = ProductType.objects.filter(needs_announcement=True)
if product_types:
for p in product_types.all():
print("[{}](http://bjorleit.info{})\n".format(p.alias, p.get_absolute_url()))
p.needs_announcement = False
p.save()
print("{} vörur alls".format(product_types.count()))
else:
print("No product types are in need of announcement.")
|
Make a batch announcement script for generating gistsfrom beer_search_v2.models import ProductType
from django.core.management.base import BaseCommand
class Command(BaseCommand):
def handle(self, *args, **options):
"""
Prints out a markdown-formatted list of products that can be posted as a whole
"""
product_types = ProductType.objects.filter(needs_announcement=True)
if product_types:
for p in product_types.all():
print("[{}](http://bjorleit.info{})\n".format(p.alias, p.get_absolute_url()))
p.needs_announcement = False
p.save()
print("{} vörur alls".format(product_types.count()))
else:
print("No product types are in need of announcement.")
|
<commit_before><commit_msg>Make a batch announcement script for generating gists<commit_after>from beer_search_v2.models import ProductType
from django.core.management.base import BaseCommand
class Command(BaseCommand):
def handle(self, *args, **options):
"""
Prints out a markdown-formatted list of products that can be posted as a whole
"""
product_types = ProductType.objects.filter(needs_announcement=True)
if product_types:
for p in product_types.all():
print("[{}](http://bjorleit.info{})\n".format(p.alias, p.get_absolute_url()))
p.needs_announcement = False
p.save()
print("{} vörur alls".format(product_types.count()))
else:
print("No product types are in need of announcement.")
|
|
8f75f1d02d44b0bc216b93ef163ac2f4e877e9ce
|
test/main.py
|
test/main.py
|
#! /usr/bin/env python
#
# Copyright (c) 2011 Red Hat, Inc.
#
# This software is licensed to you under the GNU Lesser General Public
# License as published by the Free Software Foundation; either version
# 2 of the License (LGPLv2) or (at your option) any later version.
# There is NO WARRANTY for this software, express or implied,
# including the implied warranties of MERCHANTABILITY,
# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should
# have received a copy of LGPLv2 along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/lgpl-2.0.txt.
#
# Jeff Ortel <jortel@redhat.com>
#
"""
Provides a debugging agent that loads configuration and plugins
from standard locations. But, does not need to run as root
since it does use /var directories.
IMPORTANT: be sure the installed daemon is stopped.
"""
import os
import time
from gofer.agent import logutil
class TestAgent:
ROOT = '/tmp/gofer'
def start(self):
self.mkdir()
logutil.LOGDIR = self.ROOT
from gofer.agent.main import PluginLoader, Agent, AgentLock, eager
from gofer.rmi.store import PendingQueue
AgentLock.PATH = os.path.join(self.ROOT, 'gofer.pid')
PendingQueue.ROOT = os.path.join(self.ROOT, 'messaging/pending')
self.mkdir(PendingQueue.ROOT)
pl = PluginLoader()
plugins = pl.load(eager())
agent = Agent(plugins)
agent.start(False)
print 'Agent: started'
while True:
time.sleep(10)
print 'Agent: sleeping...'
def mkdir(self, path=ROOT):
if not os.path.exists(path):
os.makedirs(path)
return path
if __name__ == '__main__':
agent = TestAgent()
agent.start()
print 'Done'
|
Add for debugging w/o running as root.
|
Add for debugging w/o running as root.
|
Python
|
lgpl-2.1
|
jortel/gofer,credativ/gofer,credativ/gofer,kgiusti/gofer,kgiusti/gofer,jortel/gofer,splice/gofer,splice/gofer,splice/gofer
|
Add for debugging w/o running as root.
|
#! /usr/bin/env python
#
# Copyright (c) 2011 Red Hat, Inc.
#
# This software is licensed to you under the GNU Lesser General Public
# License as published by the Free Software Foundation; either version
# 2 of the License (LGPLv2) or (at your option) any later version.
# There is NO WARRANTY for this software, express or implied,
# including the implied warranties of MERCHANTABILITY,
# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should
# have received a copy of LGPLv2 along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/lgpl-2.0.txt.
#
# Jeff Ortel <jortel@redhat.com>
#
"""
Provides a debugging agent that loads configuration and plugins
from standard locations. But, does not need to run as root
since it does use /var directories.
IMPORTANT: be sure the installed daemon is stopped.
"""
import os
import time
from gofer.agent import logutil
class TestAgent:
ROOT = '/tmp/gofer'
def start(self):
self.mkdir()
logutil.LOGDIR = self.ROOT
from gofer.agent.main import PluginLoader, Agent, AgentLock, eager
from gofer.rmi.store import PendingQueue
AgentLock.PATH = os.path.join(self.ROOT, 'gofer.pid')
PendingQueue.ROOT = os.path.join(self.ROOT, 'messaging/pending')
self.mkdir(PendingQueue.ROOT)
pl = PluginLoader()
plugins = pl.load(eager())
agent = Agent(plugins)
agent.start(False)
print 'Agent: started'
while True:
time.sleep(10)
print 'Agent: sleeping...'
def mkdir(self, path=ROOT):
if not os.path.exists(path):
os.makedirs(path)
return path
if __name__ == '__main__':
agent = TestAgent()
agent.start()
print 'Done'
|
<commit_before><commit_msg>Add for debugging w/o running as root.<commit_after>
|
#! /usr/bin/env python
#
# Copyright (c) 2011 Red Hat, Inc.
#
# This software is licensed to you under the GNU Lesser General Public
# License as published by the Free Software Foundation; either version
# 2 of the License (LGPLv2) or (at your option) any later version.
# There is NO WARRANTY for this software, express or implied,
# including the implied warranties of MERCHANTABILITY,
# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should
# have received a copy of LGPLv2 along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/lgpl-2.0.txt.
#
# Jeff Ortel <jortel@redhat.com>
#
"""
Provides a debugging agent that loads configuration and plugins
from standard locations. But, does not need to run as root
since it does use /var directories.
IMPORTANT: be sure the installed daemon is stopped.
"""
import os
import time
from gofer.agent import logutil
class TestAgent:
ROOT = '/tmp/gofer'
def start(self):
self.mkdir()
logutil.LOGDIR = self.ROOT
from gofer.agent.main import PluginLoader, Agent, AgentLock, eager
from gofer.rmi.store import PendingQueue
AgentLock.PATH = os.path.join(self.ROOT, 'gofer.pid')
PendingQueue.ROOT = os.path.join(self.ROOT, 'messaging/pending')
self.mkdir(PendingQueue.ROOT)
pl = PluginLoader()
plugins = pl.load(eager())
agent = Agent(plugins)
agent.start(False)
print 'Agent: started'
while True:
time.sleep(10)
print 'Agent: sleeping...'
def mkdir(self, path=ROOT):
if not os.path.exists(path):
os.makedirs(path)
return path
if __name__ == '__main__':
agent = TestAgent()
agent.start()
print 'Done'
|
Add for debugging w/o running as root.#! /usr/bin/env python
#
# Copyright (c) 2011 Red Hat, Inc.
#
# This software is licensed to you under the GNU Lesser General Public
# License as published by the Free Software Foundation; either version
# 2 of the License (LGPLv2) or (at your option) any later version.
# There is NO WARRANTY for this software, express or implied,
# including the implied warranties of MERCHANTABILITY,
# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should
# have received a copy of LGPLv2 along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/lgpl-2.0.txt.
#
# Jeff Ortel <jortel@redhat.com>
#
"""
Provides a debugging agent that loads configuration and plugins
from standard locations. But, does not need to run as root
since it does use /var directories.
IMPORTANT: be sure the installed daemon is stopped.
"""
import os
import time
from gofer.agent import logutil
class TestAgent:
ROOT = '/tmp/gofer'
def start(self):
self.mkdir()
logutil.LOGDIR = self.ROOT
from gofer.agent.main import PluginLoader, Agent, AgentLock, eager
from gofer.rmi.store import PendingQueue
AgentLock.PATH = os.path.join(self.ROOT, 'gofer.pid')
PendingQueue.ROOT = os.path.join(self.ROOT, 'messaging/pending')
self.mkdir(PendingQueue.ROOT)
pl = PluginLoader()
plugins = pl.load(eager())
agent = Agent(plugins)
agent.start(False)
print 'Agent: started'
while True:
time.sleep(10)
print 'Agent: sleeping...'
def mkdir(self, path=ROOT):
if not os.path.exists(path):
os.makedirs(path)
return path
if __name__ == '__main__':
agent = TestAgent()
agent.start()
print 'Done'
|
<commit_before><commit_msg>Add for debugging w/o running as root.<commit_after>#! /usr/bin/env python
#
# Copyright (c) 2011 Red Hat, Inc.
#
# This software is licensed to you under the GNU Lesser General Public
# License as published by the Free Software Foundation; either version
# 2 of the License (LGPLv2) or (at your option) any later version.
# There is NO WARRANTY for this software, express or implied,
# including the implied warranties of MERCHANTABILITY,
# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should
# have received a copy of LGPLv2 along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/lgpl-2.0.txt.
#
# Jeff Ortel <jortel@redhat.com>
#
"""
Provides a debugging agent that loads configuration and plugins
from standard locations. But, does not need to run as root
since it does use /var directories.
IMPORTANT: be sure the installed daemon is stopped.
"""
import os
import time
from gofer.agent import logutil
class TestAgent:
ROOT = '/tmp/gofer'
def start(self):
self.mkdir()
logutil.LOGDIR = self.ROOT
from gofer.agent.main import PluginLoader, Agent, AgentLock, eager
from gofer.rmi.store import PendingQueue
AgentLock.PATH = os.path.join(self.ROOT, 'gofer.pid')
PendingQueue.ROOT = os.path.join(self.ROOT, 'messaging/pending')
self.mkdir(PendingQueue.ROOT)
pl = PluginLoader()
plugins = pl.load(eager())
agent = Agent(plugins)
agent.start(False)
print 'Agent: started'
while True:
time.sleep(10)
print 'Agent: sleeping...'
def mkdir(self, path=ROOT):
if not os.path.exists(path):
os.makedirs(path)
return path
if __name__ == '__main__':
agent = TestAgent()
agent.start()
print 'Done'
|
|
11602e6729a9549aa334f776a9920e79ff2bab1a
|
pinax/teams/hooks.py
|
pinax/teams/hooks.py
|
from django.db.models import Q
from django.core.urlresolvers import reverse
class TeamDefaultHookset(object):
def build_team_url(self, url_name, team_slug):
return reverse(url_name, args=[team_slug])
def get_autocomplete_result(self, user):
return {"pk": user.pk, "email": user.email, "name": user.get_full_name()}
def search_queryset(self, query, users):
return users.filter(
Q(email__icontains=query) |
Q(username__icontains=query) |
Q(first_name__icontains=query) |
Q(last_name__icontains=query)
)
class HookProxy(object):
def __getattr__(self, attr):
from teams.conf import settings
return getattr(settings.TEAMS_HOOKSET, attr)
hookset = HookProxy()
|
from django.db.models import Q
from django.core.urlresolvers import reverse
class TeamDefaultHookset(object):
def build_team_url(self, url_name, team_slug):
return reverse(url_name, args=[team_slug])
def get_autocomplete_result(self, user):
return {"pk": user.pk, "email": user.email, "name": user.get_full_name()}
def search_queryset(self, query, users):
return users.filter(
Q(email__icontains=query) |
Q(username__icontains=query) |
Q(first_name__icontains=query) |
Q(last_name__icontains=query)
)
class HookProxy(object):
def __getattr__(self, attr):
from pinax.teams.conf import settings
return getattr(settings.TEAMS_HOOKSET, attr)
hookset = HookProxy()
|
Fix bug left over from the namespacing
|
Fix bug left over from the namespacing
|
Python
|
mit
|
rizumu/pinax-teams,jacobwegner/pinax-teams,pinax/pinax-teams,miurahr/pinax-teams,pinax/pinax-teams
|
from django.db.models import Q
from django.core.urlresolvers import reverse
class TeamDefaultHookset(object):
def build_team_url(self, url_name, team_slug):
return reverse(url_name, args=[team_slug])
def get_autocomplete_result(self, user):
return {"pk": user.pk, "email": user.email, "name": user.get_full_name()}
def search_queryset(self, query, users):
return users.filter(
Q(email__icontains=query) |
Q(username__icontains=query) |
Q(first_name__icontains=query) |
Q(last_name__icontains=query)
)
class HookProxy(object):
def __getattr__(self, attr):
from teams.conf import settings
return getattr(settings.TEAMS_HOOKSET, attr)
hookset = HookProxy()
Fix bug left over from the namespacing
|
from django.db.models import Q
from django.core.urlresolvers import reverse
class TeamDefaultHookset(object):
def build_team_url(self, url_name, team_slug):
return reverse(url_name, args=[team_slug])
def get_autocomplete_result(self, user):
return {"pk": user.pk, "email": user.email, "name": user.get_full_name()}
def search_queryset(self, query, users):
return users.filter(
Q(email__icontains=query) |
Q(username__icontains=query) |
Q(first_name__icontains=query) |
Q(last_name__icontains=query)
)
class HookProxy(object):
def __getattr__(self, attr):
from pinax.teams.conf import settings
return getattr(settings.TEAMS_HOOKSET, attr)
hookset = HookProxy()
|
<commit_before>from django.db.models import Q
from django.core.urlresolvers import reverse
class TeamDefaultHookset(object):
def build_team_url(self, url_name, team_slug):
return reverse(url_name, args=[team_slug])
def get_autocomplete_result(self, user):
return {"pk": user.pk, "email": user.email, "name": user.get_full_name()}
def search_queryset(self, query, users):
return users.filter(
Q(email__icontains=query) |
Q(username__icontains=query) |
Q(first_name__icontains=query) |
Q(last_name__icontains=query)
)
class HookProxy(object):
def __getattr__(self, attr):
from teams.conf import settings
return getattr(settings.TEAMS_HOOKSET, attr)
hookset = HookProxy()
<commit_msg>Fix bug left over from the namespacing<commit_after>
|
from django.db.models import Q
from django.core.urlresolvers import reverse
class TeamDefaultHookset(object):
def build_team_url(self, url_name, team_slug):
return reverse(url_name, args=[team_slug])
def get_autocomplete_result(self, user):
return {"pk": user.pk, "email": user.email, "name": user.get_full_name()}
def search_queryset(self, query, users):
return users.filter(
Q(email__icontains=query) |
Q(username__icontains=query) |
Q(first_name__icontains=query) |
Q(last_name__icontains=query)
)
class HookProxy(object):
def __getattr__(self, attr):
from pinax.teams.conf import settings
return getattr(settings.TEAMS_HOOKSET, attr)
hookset = HookProxy()
|
from django.db.models import Q
from django.core.urlresolvers import reverse
class TeamDefaultHookset(object):
def build_team_url(self, url_name, team_slug):
return reverse(url_name, args=[team_slug])
def get_autocomplete_result(self, user):
return {"pk": user.pk, "email": user.email, "name": user.get_full_name()}
def search_queryset(self, query, users):
return users.filter(
Q(email__icontains=query) |
Q(username__icontains=query) |
Q(first_name__icontains=query) |
Q(last_name__icontains=query)
)
class HookProxy(object):
def __getattr__(self, attr):
from teams.conf import settings
return getattr(settings.TEAMS_HOOKSET, attr)
hookset = HookProxy()
Fix bug left over from the namespacingfrom django.db.models import Q
from django.core.urlresolvers import reverse
class TeamDefaultHookset(object):
def build_team_url(self, url_name, team_slug):
return reverse(url_name, args=[team_slug])
def get_autocomplete_result(self, user):
return {"pk": user.pk, "email": user.email, "name": user.get_full_name()}
def search_queryset(self, query, users):
return users.filter(
Q(email__icontains=query) |
Q(username__icontains=query) |
Q(first_name__icontains=query) |
Q(last_name__icontains=query)
)
class HookProxy(object):
def __getattr__(self, attr):
from pinax.teams.conf import settings
return getattr(settings.TEAMS_HOOKSET, attr)
hookset = HookProxy()
|
<commit_before>from django.db.models import Q
from django.core.urlresolvers import reverse
class TeamDefaultHookset(object):
def build_team_url(self, url_name, team_slug):
return reverse(url_name, args=[team_slug])
def get_autocomplete_result(self, user):
return {"pk": user.pk, "email": user.email, "name": user.get_full_name()}
def search_queryset(self, query, users):
return users.filter(
Q(email__icontains=query) |
Q(username__icontains=query) |
Q(first_name__icontains=query) |
Q(last_name__icontains=query)
)
class HookProxy(object):
def __getattr__(self, attr):
from teams.conf import settings
return getattr(settings.TEAMS_HOOKSET, attr)
hookset = HookProxy()
<commit_msg>Fix bug left over from the namespacing<commit_after>from django.db.models import Q
from django.core.urlresolvers import reverse
class TeamDefaultHookset(object):
def build_team_url(self, url_name, team_slug):
return reverse(url_name, args=[team_slug])
def get_autocomplete_result(self, user):
return {"pk": user.pk, "email": user.email, "name": user.get_full_name()}
def search_queryset(self, query, users):
return users.filter(
Q(email__icontains=query) |
Q(username__icontains=query) |
Q(first_name__icontains=query) |
Q(last_name__icontains=query)
)
class HookProxy(object):
def __getattr__(self, attr):
from pinax.teams.conf import settings
return getattr(settings.TEAMS_HOOKSET, attr)
hookset = HookProxy()
|
27d29f2f30be7dd899fae08b09d181a22be8d56a
|
python/ctci_queue_using_two_stacks.py
|
python/ctci_queue_using_two_stacks.py
|
class MyQueue(object):
def __init__(self):
self.stack_1 = []
self.stack_2 = []
def peek(self):
self.migrate_stacks_if_necessary()
return self.stack_2[-1]
def pop(self):
self.migrate_stacks_if_necessary()
return self.stack_2.pop()
def put(self, value):
self.stack_1.append(value)
def migrate_stacks_if_necessary(self):
if len(self.stack_2) == 0:
self.migrate_stacks()
def migrate_stacks(self):
while len(self.stack_1) != 0:
self.stack_2.append(self.stack_1.pop())
queue = MyQueue()
t = int(input())
for line in range(t):
values = map(int, input().split())
values = list(values)
if values[0] == 1:
queue.put(values[1])
elif values[0] == 2:
queue.pop()
else:
print(queue.peek())
|
Solve queue using two stacks
|
Solve queue using two stacks
|
Python
|
mit
|
rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank
|
Solve queue using two stacks
|
class MyQueue(object):
def __init__(self):
self.stack_1 = []
self.stack_2 = []
def peek(self):
self.migrate_stacks_if_necessary()
return self.stack_2[-1]
def pop(self):
self.migrate_stacks_if_necessary()
return self.stack_2.pop()
def put(self, value):
self.stack_1.append(value)
def migrate_stacks_if_necessary(self):
if len(self.stack_2) == 0:
self.migrate_stacks()
def migrate_stacks(self):
while len(self.stack_1) != 0:
self.stack_2.append(self.stack_1.pop())
queue = MyQueue()
t = int(input())
for line in range(t):
values = map(int, input().split())
values = list(values)
if values[0] == 1:
queue.put(values[1])
elif values[0] == 2:
queue.pop()
else:
print(queue.peek())
|
<commit_before><commit_msg>Solve queue using two stacks<commit_after>
|
class MyQueue(object):
def __init__(self):
self.stack_1 = []
self.stack_2 = []
def peek(self):
self.migrate_stacks_if_necessary()
return self.stack_2[-1]
def pop(self):
self.migrate_stacks_if_necessary()
return self.stack_2.pop()
def put(self, value):
self.stack_1.append(value)
def migrate_stacks_if_necessary(self):
if len(self.stack_2) == 0:
self.migrate_stacks()
def migrate_stacks(self):
while len(self.stack_1) != 0:
self.stack_2.append(self.stack_1.pop())
queue = MyQueue()
t = int(input())
for line in range(t):
values = map(int, input().split())
values = list(values)
if values[0] == 1:
queue.put(values[1])
elif values[0] == 2:
queue.pop()
else:
print(queue.peek())
|
Solve queue using two stacksclass MyQueue(object):
def __init__(self):
self.stack_1 = []
self.stack_2 = []
def peek(self):
self.migrate_stacks_if_necessary()
return self.stack_2[-1]
def pop(self):
self.migrate_stacks_if_necessary()
return self.stack_2.pop()
def put(self, value):
self.stack_1.append(value)
def migrate_stacks_if_necessary(self):
if len(self.stack_2) == 0:
self.migrate_stacks()
def migrate_stacks(self):
while len(self.stack_1) != 0:
self.stack_2.append(self.stack_1.pop())
queue = MyQueue()
t = int(input())
for line in range(t):
values = map(int, input().split())
values = list(values)
if values[0] == 1:
queue.put(values[1])
elif values[0] == 2:
queue.pop()
else:
print(queue.peek())
|
<commit_before><commit_msg>Solve queue using two stacks<commit_after>class MyQueue(object):
def __init__(self):
self.stack_1 = []
self.stack_2 = []
def peek(self):
self.migrate_stacks_if_necessary()
return self.stack_2[-1]
def pop(self):
self.migrate_stacks_if_necessary()
return self.stack_2.pop()
def put(self, value):
self.stack_1.append(value)
def migrate_stacks_if_necessary(self):
if len(self.stack_2) == 0:
self.migrate_stacks()
def migrate_stacks(self):
while len(self.stack_1) != 0:
self.stack_2.append(self.stack_1.pop())
queue = MyQueue()
t = int(input())
for line in range(t):
values = map(int, input().split())
values = list(values)
if values[0] == 1:
queue.put(values[1])
elif values[0] == 2:
queue.pop()
else:
print(queue.peek())
|
|
534dece8a904868aafe17f521535468c386700aa
|
tests/sentry/web/frontend/test_organization_member_settings.py
|
tests/sentry/web/frontend/test_organization_member_settings.py
|
from __future__ import absolute_import
from django.core.urlresolvers import reverse
from sentry.models import OrganizationMember, OrganizationMemberType
from sentry.testutils import TestCase
class OrganizationMemberSettingsTest(TestCase):
def test_renders_with_context(self):
organization = self.create_organization(name='foo', owner=self.user)
team_1 = self.create_team(name='foo', organization=organization)
team_2 = self.create_team(name='bar', organization=organization)
user = self.create_user('bar@example.com')
member = OrganizationMember.objects.create(
organization=organization,
user=user,
type=OrganizationMemberType.MEMBER,
has_global_access=False,
)
member.teams.add(team_2)
path = reverse('sentry-organization-member-settings', args=[organization.id, member.id])
self.login_as(self.user)
resp = self.client.get(path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/organization-member-settings.html')
assert resp.context['organization'] == organization
assert resp.context['member'] == member
assert resp.context['form']
|
Add test for member settings display
|
Add test for member settings display
|
Python
|
bsd-3-clause
|
BuildingLink/sentry,zenefits/sentry,kevinastone/sentry,drcapulet/sentry,wujuguang/sentry,camilonova/sentry,llonchj/sentry,fotinakis/sentry,kevinastone/sentry,JackDanger/sentry,BuildingLink/sentry,mvaled/sentry,drcapulet/sentry,Kryz/sentry,jean/sentry,hongliang5623/sentry,ngonzalvez/sentry,JackDanger/sentry,mvaled/sentry,BuildingLink/sentry,ifduyue/sentry,kevinlondon/sentry,alexm92/sentry,llonchj/sentry,mvaled/sentry,ifduyue/sentry,hongliang5623/sentry,JackDanger/sentry,songyi199111/sentry,beeftornado/sentry,mitsuhiko/sentry,gencer/sentry,looker/sentry,JTCunning/sentry,ngonzalvez/sentry,vperron/sentry,ifduyue/sentry,mvaled/sentry,wujuguang/sentry,fotinakis/sentry,TedaLIEz/sentry,kevinlondon/sentry,gencer/sentry,alexm92/sentry,BayanGroup/sentry,wong2/sentry,gencer/sentry,kevinastone/sentry,wujuguang/sentry,Natim/sentry,fotinakis/sentry,beeftornado/sentry,BayanGroup/sentry,songyi199111/sentry,JamesMura/sentry,mitsuhiko/sentry,korealerts1/sentry,daevaorn/sentry,ngonzalvez/sentry,fotinakis/sentry,boneyao/sentry,jean/sentry,JTCunning/sentry,zenefits/sentry,gg7/sentry,BuildingLink/sentry,TedaLIEz/sentry,jean/sentry,ewdurbin/sentry,ifduyue/sentry,drcapulet/sentry,beeftornado/sentry,boneyao/sentry,BuildingLink/sentry,JamesMura/sentry,felixbuenemann/sentry,imankulov/sentry,jokey2k/sentry,felixbuenemann/sentry,imankulov/sentry,jean/sentry,gencer/sentry,fuziontech/sentry,jokey2k/sentry,Natim/sentry,JTCunning/sentry,korealerts1/sentry,gg7/sentry,BayanGroup/sentry,ewdurbin/sentry,argonemyth/sentry,1tush/sentry,pauloschilling/sentry,songyi199111/sentry,1tush/sentry,fuziontech/sentry,camilonova/sentry,nicholasserra/sentry,JamesMura/sentry,jean/sentry,Kryz/sentry,wong2/sentry,pauloschilling/sentry,mvaled/sentry,daevaorn/sentry,vperron/sentry,gg7/sentry,fuziontech/sentry,1tush/sentry,zenefits/sentry,looker/sentry,nicholasserra/sentry,wong2/sentry,imankulov/sentry,kevinlondon/sentry,daevaorn/sentry,vperron/sentry,Natim/sentry,Kryz/sentry,JamesMura/sentry,ifduyue/sentry,boneyao/sentry,gencer/sentry,jokey2k/sentry,camilonova/sentry,hongliang5623/sentry,korealerts1/sentry,pauloschilling/sentry,felixbuenemann/sentry,alexm92/sentry,daevaorn/sentry,zenefits/sentry,looker/sentry,llonchj/sentry,argonemyth/sentry,zenefits/sentry,looker/sentry,ewdurbin/sentry,nicholasserra/sentry,looker/sentry,argonemyth/sentry,TedaLIEz/sentry,JamesMura/sentry,mvaled/sentry
|
Add test for member settings display
|
from __future__ import absolute_import
from django.core.urlresolvers import reverse
from sentry.models import OrganizationMember, OrganizationMemberType
from sentry.testutils import TestCase
class OrganizationMemberSettingsTest(TestCase):
def test_renders_with_context(self):
organization = self.create_organization(name='foo', owner=self.user)
team_1 = self.create_team(name='foo', organization=organization)
team_2 = self.create_team(name='bar', organization=organization)
user = self.create_user('bar@example.com')
member = OrganizationMember.objects.create(
organization=organization,
user=user,
type=OrganizationMemberType.MEMBER,
has_global_access=False,
)
member.teams.add(team_2)
path = reverse('sentry-organization-member-settings', args=[organization.id, member.id])
self.login_as(self.user)
resp = self.client.get(path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/organization-member-settings.html')
assert resp.context['organization'] == organization
assert resp.context['member'] == member
assert resp.context['form']
|
<commit_before><commit_msg>Add test for member settings display<commit_after>
|
from __future__ import absolute_import
from django.core.urlresolvers import reverse
from sentry.models import OrganizationMember, OrganizationMemberType
from sentry.testutils import TestCase
class OrganizationMemberSettingsTest(TestCase):
def test_renders_with_context(self):
organization = self.create_organization(name='foo', owner=self.user)
team_1 = self.create_team(name='foo', organization=organization)
team_2 = self.create_team(name='bar', organization=organization)
user = self.create_user('bar@example.com')
member = OrganizationMember.objects.create(
organization=organization,
user=user,
type=OrganizationMemberType.MEMBER,
has_global_access=False,
)
member.teams.add(team_2)
path = reverse('sentry-organization-member-settings', args=[organization.id, member.id])
self.login_as(self.user)
resp = self.client.get(path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/organization-member-settings.html')
assert resp.context['organization'] == organization
assert resp.context['member'] == member
assert resp.context['form']
|
Add test for member settings displayfrom __future__ import absolute_import
from django.core.urlresolvers import reverse
from sentry.models import OrganizationMember, OrganizationMemberType
from sentry.testutils import TestCase
class OrganizationMemberSettingsTest(TestCase):
def test_renders_with_context(self):
organization = self.create_organization(name='foo', owner=self.user)
team_1 = self.create_team(name='foo', organization=organization)
team_2 = self.create_team(name='bar', organization=organization)
user = self.create_user('bar@example.com')
member = OrganizationMember.objects.create(
organization=organization,
user=user,
type=OrganizationMemberType.MEMBER,
has_global_access=False,
)
member.teams.add(team_2)
path = reverse('sentry-organization-member-settings', args=[organization.id, member.id])
self.login_as(self.user)
resp = self.client.get(path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/organization-member-settings.html')
assert resp.context['organization'] == organization
assert resp.context['member'] == member
assert resp.context['form']
|
<commit_before><commit_msg>Add test for member settings display<commit_after>from __future__ import absolute_import
from django.core.urlresolvers import reverse
from sentry.models import OrganizationMember, OrganizationMemberType
from sentry.testutils import TestCase
class OrganizationMemberSettingsTest(TestCase):
def test_renders_with_context(self):
organization = self.create_organization(name='foo', owner=self.user)
team_1 = self.create_team(name='foo', organization=organization)
team_2 = self.create_team(name='bar', organization=organization)
user = self.create_user('bar@example.com')
member = OrganizationMember.objects.create(
organization=organization,
user=user,
type=OrganizationMemberType.MEMBER,
has_global_access=False,
)
member.teams.add(team_2)
path = reverse('sentry-organization-member-settings', args=[organization.id, member.id])
self.login_as(self.user)
resp = self.client.get(path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/organization-member-settings.html')
assert resp.context['organization'] == organization
assert resp.context['member'] == member
assert resp.context['form']
|
|
398bdd0b004f8f29bfd405a690102d7d3283fb76
|
ipa-fun-bootstrap.py
|
ipa-fun-bootstrap.py
|
#! /bin/bash
##############################################################################
# Author: Tomas Babej <tbabej@redhat.com>
#
# Builds the FreeIPA rpms from sources.
# Removes everything from the $DIST_DIR and moves newly built RPMs there
#
# Usage: $0
# Returns: 0 on success, 1 on failure
##############################################################################
# If any command here fails, exit the script
set -e
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
source $DIR/config.sh
# Change sources in site-packages
pushd "/usr/lib/python2.7/site-packages/"
for DIR in `echo ipalib ipaserver ipapython ipatests`
do
echo "Removing $DIR"
sudo rm -rf $DIR || :
echo "Linking $IPA_DIR/$DIR to $DIR"
sudo ln -s $IPA_DIR/$DIR $DIR
done
popd; echo; echo
# Change tools
pushd $IPA_DIR/install/tools
for FILE in ipa-* ipactl
do
FILEPATH=`which $FILE` || FILEPATH="/usr/sbin/$FILE"
echo "Removing $FILEPATH"
sudo rm $FILEPATH || :
echo "Linking $IPA_DIR/install/tools/$FILE to $FILEPATH"
sudo ln -s $IPA_DIR/install/tools/$FILE $FILEPATH
done
popd; echo; echo;
# Install share directory
sudo rm -rf /usr/share/ipa || :
sudo mkdir /usr/share/ipa
pushd $IPA_DIR/install/share
for FILE in *
do
sudo ln -s $IPA_DIR/install/share/$FILE /usr/share/ipa/$FILE
done
for FILE in `echo ffextension html migration ui updates wsgi`
do
sudo ln -s $IPA_DIR/install/$FILE /usr/share/ipa/$FILE
done
|
Add initial version of bootstrap tool
|
Add initial version of bootstrap tool
|
Python
|
mit
|
pspacek/labtool,pvoborni/labtool,pspacek/labtool,tomaskrizek/labtool,pvoborni/labtool,tomaskrizek/labtool
|
Add initial version of bootstrap tool
|
#! /bin/bash
##############################################################################
# Author: Tomas Babej <tbabej@redhat.com>
#
# Builds the FreeIPA rpms from sources.
# Removes everything from the $DIST_DIR and moves newly built RPMs there
#
# Usage: $0
# Returns: 0 on success, 1 on failure
##############################################################################
# If any command here fails, exit the script
set -e
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
source $DIR/config.sh
# Change sources in site-packages
pushd "/usr/lib/python2.7/site-packages/"
for DIR in `echo ipalib ipaserver ipapython ipatests`
do
echo "Removing $DIR"
sudo rm -rf $DIR || :
echo "Linking $IPA_DIR/$DIR to $DIR"
sudo ln -s $IPA_DIR/$DIR $DIR
done
popd; echo; echo
# Change tools
pushd $IPA_DIR/install/tools
for FILE in ipa-* ipactl
do
FILEPATH=`which $FILE` || FILEPATH="/usr/sbin/$FILE"
echo "Removing $FILEPATH"
sudo rm $FILEPATH || :
echo "Linking $IPA_DIR/install/tools/$FILE to $FILEPATH"
sudo ln -s $IPA_DIR/install/tools/$FILE $FILEPATH
done
popd; echo; echo;
# Install share directory
sudo rm -rf /usr/share/ipa || :
sudo mkdir /usr/share/ipa
pushd $IPA_DIR/install/share
for FILE in *
do
sudo ln -s $IPA_DIR/install/share/$FILE /usr/share/ipa/$FILE
done
for FILE in `echo ffextension html migration ui updates wsgi`
do
sudo ln -s $IPA_DIR/install/$FILE /usr/share/ipa/$FILE
done
|
<commit_before><commit_msg>Add initial version of bootstrap tool<commit_after>
|
#! /bin/bash
##############################################################################
# Author: Tomas Babej <tbabej@redhat.com>
#
# Builds the FreeIPA rpms from sources.
# Removes everything from the $DIST_DIR and moves newly built RPMs there
#
# Usage: $0
# Returns: 0 on success, 1 on failure
##############################################################################
# If any command here fails, exit the script
set -e
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
source $DIR/config.sh
# Change sources in site-packages
pushd "/usr/lib/python2.7/site-packages/"
for DIR in `echo ipalib ipaserver ipapython ipatests`
do
echo "Removing $DIR"
sudo rm -rf $DIR || :
echo "Linking $IPA_DIR/$DIR to $DIR"
sudo ln -s $IPA_DIR/$DIR $DIR
done
popd; echo; echo
# Change tools
pushd $IPA_DIR/install/tools
for FILE in ipa-* ipactl
do
FILEPATH=`which $FILE` || FILEPATH="/usr/sbin/$FILE"
echo "Removing $FILEPATH"
sudo rm $FILEPATH || :
echo "Linking $IPA_DIR/install/tools/$FILE to $FILEPATH"
sudo ln -s $IPA_DIR/install/tools/$FILE $FILEPATH
done
popd; echo; echo;
# Install share directory
sudo rm -rf /usr/share/ipa || :
sudo mkdir /usr/share/ipa
pushd $IPA_DIR/install/share
for FILE in *
do
sudo ln -s $IPA_DIR/install/share/$FILE /usr/share/ipa/$FILE
done
for FILE in `echo ffextension html migration ui updates wsgi`
do
sudo ln -s $IPA_DIR/install/$FILE /usr/share/ipa/$FILE
done
|
Add initial version of bootstrap tool#! /bin/bash
##############################################################################
# Author: Tomas Babej <tbabej@redhat.com>
#
# Builds the FreeIPA rpms from sources.
# Removes everything from the $DIST_DIR and moves newly built RPMs there
#
# Usage: $0
# Returns: 0 on success, 1 on failure
##############################################################################
# If any command here fails, exit the script
set -e
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
source $DIR/config.sh
# Change sources in site-packages
pushd "/usr/lib/python2.7/site-packages/"
for DIR in `echo ipalib ipaserver ipapython ipatests`
do
echo "Removing $DIR"
sudo rm -rf $DIR || :
echo "Linking $IPA_DIR/$DIR to $DIR"
sudo ln -s $IPA_DIR/$DIR $DIR
done
popd; echo; echo
# Change tools
pushd $IPA_DIR/install/tools
for FILE in ipa-* ipactl
do
FILEPATH=`which $FILE` || FILEPATH="/usr/sbin/$FILE"
echo "Removing $FILEPATH"
sudo rm $FILEPATH || :
echo "Linking $IPA_DIR/install/tools/$FILE to $FILEPATH"
sudo ln -s $IPA_DIR/install/tools/$FILE $FILEPATH
done
popd; echo; echo;
# Install share directory
sudo rm -rf /usr/share/ipa || :
sudo mkdir /usr/share/ipa
pushd $IPA_DIR/install/share
for FILE in *
do
sudo ln -s $IPA_DIR/install/share/$FILE /usr/share/ipa/$FILE
done
for FILE in `echo ffextension html migration ui updates wsgi`
do
sudo ln -s $IPA_DIR/install/$FILE /usr/share/ipa/$FILE
done
|
<commit_before><commit_msg>Add initial version of bootstrap tool<commit_after>#! /bin/bash
##############################################################################
# Author: Tomas Babej <tbabej@redhat.com>
#
# Builds the FreeIPA rpms from sources.
# Removes everything from the $DIST_DIR and moves newly built RPMs there
#
# Usage: $0
# Returns: 0 on success, 1 on failure
##############################################################################
# If any command here fails, exit the script
set -e
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
source $DIR/config.sh
# Change sources in site-packages
pushd "/usr/lib/python2.7/site-packages/"
for DIR in `echo ipalib ipaserver ipapython ipatests`
do
echo "Removing $DIR"
sudo rm -rf $DIR || :
echo "Linking $IPA_DIR/$DIR to $DIR"
sudo ln -s $IPA_DIR/$DIR $DIR
done
popd; echo; echo
# Change tools
pushd $IPA_DIR/install/tools
for FILE in ipa-* ipactl
do
FILEPATH=`which $FILE` || FILEPATH="/usr/sbin/$FILE"
echo "Removing $FILEPATH"
sudo rm $FILEPATH || :
echo "Linking $IPA_DIR/install/tools/$FILE to $FILEPATH"
sudo ln -s $IPA_DIR/install/tools/$FILE $FILEPATH
done
popd; echo; echo;
# Install share directory
sudo rm -rf /usr/share/ipa || :
sudo mkdir /usr/share/ipa
pushd $IPA_DIR/install/share
for FILE in *
do
sudo ln -s $IPA_DIR/install/share/$FILE /usr/share/ipa/$FILE
done
for FILE in `echo ffextension html migration ui updates wsgi`
do
sudo ln -s $IPA_DIR/install/$FILE /usr/share/ipa/$FILE
done
|
|
d3b24e3808f3149a80148c26aec6a4077581e4e8
|
avg_daily_vol.py
|
avg_daily_vol.py
|
#!/usr/bin/env python
"""
Compute average daily trade volume of Indian securities.
Notes
-----
Assumes that the trade data for a specific firm XXX is stored in a file
named XXX-trades.csv.
"""
import glob
import pandas
import re
file_name_list = glob.glob('*-trades.csv')
avg_daily_vol_dict = {}
for file_name in file_name_list:
# Get security name from file name:
security = re.search('(\w+)-trades\.csv', file_name).group(1)
# Column 3 contains the day, column 11 contains the volume:
df = pandas.read_csv(file_name, header=None)
avg_daily_vol = df.groupby(3)[11].sum().mean()
avg_daily_vol_dict[security] = avg_daily_vol
# Print securities with highest volume first:
for security in sorted(avg_daily_vol_dict, key=avg_daily_vol_dict.get, reverse=True):
print '{:<15s} {:>12.2f}'.format(security, avg_daily_vol_dict[security])
|
Add script for computing average daily volume.
|
Add script for computing average daily volume.
--HG--
branch : cython
|
Python
|
bsd-3-clause
|
lebedov/nseindia_lob
|
Add script for computing average daily volume.
--HG--
branch : cython
|
#!/usr/bin/env python
"""
Compute average daily trade volume of Indian securities.
Notes
-----
Assumes that the trade data for a specific firm XXX is stored in a file
named XXX-trades.csv.
"""
import glob
import pandas
import re
file_name_list = glob.glob('*-trades.csv')
avg_daily_vol_dict = {}
for file_name in file_name_list:
# Get security name from file name:
security = re.search('(\w+)-trades\.csv', file_name).group(1)
# Column 3 contains the day, column 11 contains the volume:
df = pandas.read_csv(file_name, header=None)
avg_daily_vol = df.groupby(3)[11].sum().mean()
avg_daily_vol_dict[security] = avg_daily_vol
# Print securities with highest volume first:
for security in sorted(avg_daily_vol_dict, key=avg_daily_vol_dict.get, reverse=True):
print '{:<15s} {:>12.2f}'.format(security, avg_daily_vol_dict[security])
|
<commit_before><commit_msg>Add script for computing average daily volume.
--HG--
branch : cython<commit_after>
|
#!/usr/bin/env python
"""
Compute average daily trade volume of Indian securities.
Notes
-----
Assumes that the trade data for a specific firm XXX is stored in a file
named XXX-trades.csv.
"""
import glob
import pandas
import re
file_name_list = glob.glob('*-trades.csv')
avg_daily_vol_dict = {}
for file_name in file_name_list:
# Get security name from file name:
security = re.search('(\w+)-trades\.csv', file_name).group(1)
# Column 3 contains the day, column 11 contains the volume:
df = pandas.read_csv(file_name, header=None)
avg_daily_vol = df.groupby(3)[11].sum().mean()
avg_daily_vol_dict[security] = avg_daily_vol
# Print securities with highest volume first:
for security in sorted(avg_daily_vol_dict, key=avg_daily_vol_dict.get, reverse=True):
print '{:<15s} {:>12.2f}'.format(security, avg_daily_vol_dict[security])
|
Add script for computing average daily volume.
--HG--
branch : cython#!/usr/bin/env python
"""
Compute average daily trade volume of Indian securities.
Notes
-----
Assumes that the trade data for a specific firm XXX is stored in a file
named XXX-trades.csv.
"""
import glob
import pandas
import re
file_name_list = glob.glob('*-trades.csv')
avg_daily_vol_dict = {}
for file_name in file_name_list:
# Get security name from file name:
security = re.search('(\w+)-trades\.csv', file_name).group(1)
# Column 3 contains the day, column 11 contains the volume:
df = pandas.read_csv(file_name, header=None)
avg_daily_vol = df.groupby(3)[11].sum().mean()
avg_daily_vol_dict[security] = avg_daily_vol
# Print securities with highest volume first:
for security in sorted(avg_daily_vol_dict, key=avg_daily_vol_dict.get, reverse=True):
print '{:<15s} {:>12.2f}'.format(security, avg_daily_vol_dict[security])
|
<commit_before><commit_msg>Add script for computing average daily volume.
--HG--
branch : cython<commit_after>#!/usr/bin/env python
"""
Compute average daily trade volume of Indian securities.
Notes
-----
Assumes that the trade data for a specific firm XXX is stored in a file
named XXX-trades.csv.
"""
import glob
import pandas
import re
file_name_list = glob.glob('*-trades.csv')
avg_daily_vol_dict = {}
for file_name in file_name_list:
# Get security name from file name:
security = re.search('(\w+)-trades\.csv', file_name).group(1)
# Column 3 contains the day, column 11 contains the volume:
df = pandas.read_csv(file_name, header=None)
avg_daily_vol = df.groupby(3)[11].sum().mean()
avg_daily_vol_dict[security] = avg_daily_vol
# Print securities with highest volume first:
for security in sorted(avg_daily_vol_dict, key=avg_daily_vol_dict.get, reverse=True):
print '{:<15s} {:>12.2f}'.format(security, avg_daily_vol_dict[security])
|
|
8a7a27dd356ecb1f067be0270e82b37195605499
|
scripts/simulate_whipple_benchmark.py
|
scripts/simulate_whipple_benchmark.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import scipy
from dtk.bicycle import benchmark_state_space_vs_speed, benchmark_matrices
def simulate(v, x0, dt, n, u=None):
_, A, B = benchmark_state_space_vs_speed(*benchmark_matrices(), [v])
A = np.squeeze(A)
B = np.squeeze(B)
M = np.zeros((6, 6))
M[:4, :4] = A
M[:4, 4:] = B
M *= dt
Md = scipy.linalg.expm(M)
Md_zero = Md[4:, :4]
Md_eye = Md[4:, 4:]
if not np.array_equal(Md_zero, np.zeros(Md_zero.shape)):
print('WARNING: Failure in system discretization')
print(Md_zero)
print('should equal 0')
if not np.array_equal(Md_eye, np.eye(2)):
print('WARNING: Failure in system discretization')
print(Md_eye)
print('should equal I')
Ad = Md[:4, :4]
Bd = Md[:4, 4:]
if u is None:
u = np.zeros((2, n))
x = np.zeros((4, n))
for i in range(n):
x[:, i:i+1] = np.dot(Ad, x0) + np.dot(Bd, u[:, i:i+1])
x0 = x[:, i:i+1]
return x
if __name__ == '__main__':
from plot_sim import plot_states
import matplotlib.pyplot as plt
v = 7
dt = 0.005
n = int(4.5/dt) # simulate for 4.5 seconds
x0 = np.array(
[ 0.0031247 , 0.09299604, -0.03369007, -0.05003717]
).reshape((4, 1))
x = simulate(v, x0, dt, n)
t = np.array(range(n + 1)) * dt
fig, ax = plot_states(t, np.hstack((x0, x)).T)
plt.show()
|
Add Python script to simulate Whipple model
|
Add Python script to simulate Whipple model
|
Python
|
bsd-2-clause
|
oliverlee/phobos,oliverlee/phobos,oliverlee/phobos,oliverlee/phobos
|
Add Python script to simulate Whipple model
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import scipy
from dtk.bicycle import benchmark_state_space_vs_speed, benchmark_matrices
def simulate(v, x0, dt, n, u=None):
_, A, B = benchmark_state_space_vs_speed(*benchmark_matrices(), [v])
A = np.squeeze(A)
B = np.squeeze(B)
M = np.zeros((6, 6))
M[:4, :4] = A
M[:4, 4:] = B
M *= dt
Md = scipy.linalg.expm(M)
Md_zero = Md[4:, :4]
Md_eye = Md[4:, 4:]
if not np.array_equal(Md_zero, np.zeros(Md_zero.shape)):
print('WARNING: Failure in system discretization')
print(Md_zero)
print('should equal 0')
if not np.array_equal(Md_eye, np.eye(2)):
print('WARNING: Failure in system discretization')
print(Md_eye)
print('should equal I')
Ad = Md[:4, :4]
Bd = Md[:4, 4:]
if u is None:
u = np.zeros((2, n))
x = np.zeros((4, n))
for i in range(n):
x[:, i:i+1] = np.dot(Ad, x0) + np.dot(Bd, u[:, i:i+1])
x0 = x[:, i:i+1]
return x
if __name__ == '__main__':
from plot_sim import plot_states
import matplotlib.pyplot as plt
v = 7
dt = 0.005
n = int(4.5/dt) # simulate for 4.5 seconds
x0 = np.array(
[ 0.0031247 , 0.09299604, -0.03369007, -0.05003717]
).reshape((4, 1))
x = simulate(v, x0, dt, n)
t = np.array(range(n + 1)) * dt
fig, ax = plot_states(t, np.hstack((x0, x)).T)
plt.show()
|
<commit_before><commit_msg>Add Python script to simulate Whipple model<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import scipy
from dtk.bicycle import benchmark_state_space_vs_speed, benchmark_matrices
def simulate(v, x0, dt, n, u=None):
_, A, B = benchmark_state_space_vs_speed(*benchmark_matrices(), [v])
A = np.squeeze(A)
B = np.squeeze(B)
M = np.zeros((6, 6))
M[:4, :4] = A
M[:4, 4:] = B
M *= dt
Md = scipy.linalg.expm(M)
Md_zero = Md[4:, :4]
Md_eye = Md[4:, 4:]
if not np.array_equal(Md_zero, np.zeros(Md_zero.shape)):
print('WARNING: Failure in system discretization')
print(Md_zero)
print('should equal 0')
if not np.array_equal(Md_eye, np.eye(2)):
print('WARNING: Failure in system discretization')
print(Md_eye)
print('should equal I')
Ad = Md[:4, :4]
Bd = Md[:4, 4:]
if u is None:
u = np.zeros((2, n))
x = np.zeros((4, n))
for i in range(n):
x[:, i:i+1] = np.dot(Ad, x0) + np.dot(Bd, u[:, i:i+1])
x0 = x[:, i:i+1]
return x
if __name__ == '__main__':
from plot_sim import plot_states
import matplotlib.pyplot as plt
v = 7
dt = 0.005
n = int(4.5/dt) # simulate for 4.5 seconds
x0 = np.array(
[ 0.0031247 , 0.09299604, -0.03369007, -0.05003717]
).reshape((4, 1))
x = simulate(v, x0, dt, n)
t = np.array(range(n + 1)) * dt
fig, ax = plot_states(t, np.hstack((x0, x)).T)
plt.show()
|
Add Python script to simulate Whipple model#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import scipy
from dtk.bicycle import benchmark_state_space_vs_speed, benchmark_matrices
def simulate(v, x0, dt, n, u=None):
_, A, B = benchmark_state_space_vs_speed(*benchmark_matrices(), [v])
A = np.squeeze(A)
B = np.squeeze(B)
M = np.zeros((6, 6))
M[:4, :4] = A
M[:4, 4:] = B
M *= dt
Md = scipy.linalg.expm(M)
Md_zero = Md[4:, :4]
Md_eye = Md[4:, 4:]
if not np.array_equal(Md_zero, np.zeros(Md_zero.shape)):
print('WARNING: Failure in system discretization')
print(Md_zero)
print('should equal 0')
if not np.array_equal(Md_eye, np.eye(2)):
print('WARNING: Failure in system discretization')
print(Md_eye)
print('should equal I')
Ad = Md[:4, :4]
Bd = Md[:4, 4:]
if u is None:
u = np.zeros((2, n))
x = np.zeros((4, n))
for i in range(n):
x[:, i:i+1] = np.dot(Ad, x0) + np.dot(Bd, u[:, i:i+1])
x0 = x[:, i:i+1]
return x
if __name__ == '__main__':
from plot_sim import plot_states
import matplotlib.pyplot as plt
v = 7
dt = 0.005
n = int(4.5/dt) # simulate for 4.5 seconds
x0 = np.array(
[ 0.0031247 , 0.09299604, -0.03369007, -0.05003717]
).reshape((4, 1))
x = simulate(v, x0, dt, n)
t = np.array(range(n + 1)) * dt
fig, ax = plot_states(t, np.hstack((x0, x)).T)
plt.show()
|
<commit_before><commit_msg>Add Python script to simulate Whipple model<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import scipy
from dtk.bicycle import benchmark_state_space_vs_speed, benchmark_matrices
def simulate(v, x0, dt, n, u=None):
_, A, B = benchmark_state_space_vs_speed(*benchmark_matrices(), [v])
A = np.squeeze(A)
B = np.squeeze(B)
M = np.zeros((6, 6))
M[:4, :4] = A
M[:4, 4:] = B
M *= dt
Md = scipy.linalg.expm(M)
Md_zero = Md[4:, :4]
Md_eye = Md[4:, 4:]
if not np.array_equal(Md_zero, np.zeros(Md_zero.shape)):
print('WARNING: Failure in system discretization')
print(Md_zero)
print('should equal 0')
if not np.array_equal(Md_eye, np.eye(2)):
print('WARNING: Failure in system discretization')
print(Md_eye)
print('should equal I')
Ad = Md[:4, :4]
Bd = Md[:4, 4:]
if u is None:
u = np.zeros((2, n))
x = np.zeros((4, n))
for i in range(n):
x[:, i:i+1] = np.dot(Ad, x0) + np.dot(Bd, u[:, i:i+1])
x0 = x[:, i:i+1]
return x
if __name__ == '__main__':
from plot_sim import plot_states
import matplotlib.pyplot as plt
v = 7
dt = 0.005
n = int(4.5/dt) # simulate for 4.5 seconds
x0 = np.array(
[ 0.0031247 , 0.09299604, -0.03369007, -0.05003717]
).reshape((4, 1))
x = simulate(v, x0, dt, n)
t = np.array(range(n + 1)) * dt
fig, ax = plot_states(t, np.hstack((x0, x)).T)
plt.show()
|
|
57cd7d021485394fe8c17389bf9bdbf052af9f54
|
bin/subdivide-new-cache.py
|
bin/subdivide-new-cache.py
|
#!/usr/bin/env python
import re
from boundaries import *
script_directory = os.path.dirname(os.path.abspath(__file__))
cache_directory = os.path.realpath(os.path.join(script_directory,
'..',
'data',
'new-cache'))
for old_filename in os.listdir(cache_directory):
print "filename is", old_filename
m = re.search(r'^(way|node|relation)-(\d+)\.xml$', old_filename)
if not m:
print >> sys.stderr, "Ignoring file:", old_filename
continue
element_type, element_id = m.groups()
full_new_filename = get_cache_filename(element_type, element_id)
full_old_filename = os.path.join(cache_directory,
old_filename)
os.rename(full_old_filename, full_new_filename)
|
Add a script to migrate the element cache to the new structure
|
Add a script to migrate the element cache to the new structure
|
Python
|
agpl-3.0
|
opencorato/mapit,opencorato/mapit,chris48s/mapit,Code4SA/mapit,New-Bamboo/mapit,Code4SA/mapit,chris48s/mapit,New-Bamboo/mapit,Sinar/mapit,Code4SA/mapit,chris48s/mapit,Sinar/mapit,opencorato/mapit
|
Add a script to migrate the element cache to the new structure
|
#!/usr/bin/env python
import re
from boundaries import *
script_directory = os.path.dirname(os.path.abspath(__file__))
cache_directory = os.path.realpath(os.path.join(script_directory,
'..',
'data',
'new-cache'))
for old_filename in os.listdir(cache_directory):
print "filename is", old_filename
m = re.search(r'^(way|node|relation)-(\d+)\.xml$', old_filename)
if not m:
print >> sys.stderr, "Ignoring file:", old_filename
continue
element_type, element_id = m.groups()
full_new_filename = get_cache_filename(element_type, element_id)
full_old_filename = os.path.join(cache_directory,
old_filename)
os.rename(full_old_filename, full_new_filename)
|
<commit_before><commit_msg>Add a script to migrate the element cache to the new structure<commit_after>
|
#!/usr/bin/env python
import re
from boundaries import *
script_directory = os.path.dirname(os.path.abspath(__file__))
cache_directory = os.path.realpath(os.path.join(script_directory,
'..',
'data',
'new-cache'))
for old_filename in os.listdir(cache_directory):
print "filename is", old_filename
m = re.search(r'^(way|node|relation)-(\d+)\.xml$', old_filename)
if not m:
print >> sys.stderr, "Ignoring file:", old_filename
continue
element_type, element_id = m.groups()
full_new_filename = get_cache_filename(element_type, element_id)
full_old_filename = os.path.join(cache_directory,
old_filename)
os.rename(full_old_filename, full_new_filename)
|
Add a script to migrate the element cache to the new structure#!/usr/bin/env python
import re
from boundaries import *
script_directory = os.path.dirname(os.path.abspath(__file__))
cache_directory = os.path.realpath(os.path.join(script_directory,
'..',
'data',
'new-cache'))
for old_filename in os.listdir(cache_directory):
print "filename is", old_filename
m = re.search(r'^(way|node|relation)-(\d+)\.xml$', old_filename)
if not m:
print >> sys.stderr, "Ignoring file:", old_filename
continue
element_type, element_id = m.groups()
full_new_filename = get_cache_filename(element_type, element_id)
full_old_filename = os.path.join(cache_directory,
old_filename)
os.rename(full_old_filename, full_new_filename)
|
<commit_before><commit_msg>Add a script to migrate the element cache to the new structure<commit_after>#!/usr/bin/env python
import re
from boundaries import *
script_directory = os.path.dirname(os.path.abspath(__file__))
cache_directory = os.path.realpath(os.path.join(script_directory,
'..',
'data',
'new-cache'))
for old_filename in os.listdir(cache_directory):
print "filename is", old_filename
m = re.search(r'^(way|node|relation)-(\d+)\.xml$', old_filename)
if not m:
print >> sys.stderr, "Ignoring file:", old_filename
continue
element_type, element_id = m.groups()
full_new_filename = get_cache_filename(element_type, element_id)
full_old_filename = os.path.join(cache_directory,
old_filename)
os.rename(full_old_filename, full_new_filename)
|
|
908c52c3d0f6f0a25cfd4868673a8058ad862400
|
bin/update/deploy-product-details.py
|
bin/update/deploy-product-details.py
|
"""
Deployment for Bedrock in production.
Requires commander (https://github.com/oremj/commander) which is installed on
the systems that need it.
"""
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from commander.deploy import task, hostgroups
import commander_settings as settings
# Functions below called by chief in this order
@task
def pre_update(ctx, ref=settings.UPDATE_REF):
print "done"
@task
def update(ctx):
with ctx.lcd(settings.SRC_DIR):
ctx.local('bin/update-scripts/prod/update-prod-product-details.sh')
@hostgroups(settings.WEB_HOSTGROUP, remote_kwargs={'ssh_key': settings.SSH_KEY})
def deploy(ctx):
ctx.remote("service httpd graceful")
|
Add product-details chief deployment script.
|
Add product-details chief deployment script.
After this merges we'll only need to symlink the
old p-d deployment script to this one, like the
rest of the chief scripts are.
|
Python
|
mpl-2.0
|
dudepare/bedrock,ericawright/bedrock,rishiloyola/bedrock,chirilo/bedrock,marcoscaceres/bedrock,petabyte/bedrock,dudepare/bedrock,bensternthal/bedrock,jacshfr/mozilla-bedrock,sylvestre/bedrock,Sancus/bedrock,pmclanahan/bedrock,mkmelin/bedrock,davehunt/bedrock,mermi/bedrock,Sancus/bedrock,malena/bedrock,mermi/bedrock,ericawright/bedrock,Jobava/bedrock,craigcook/bedrock,flodolo/bedrock,marcoscaceres/bedrock,rishiloyola/bedrock,pascalchevrel/bedrock,CSCI-462-01-2017/bedrock,Sancus/bedrock,malena/bedrock,sgarrity/bedrock,MichaelKohler/bedrock,flodolo/bedrock,marcoscaceres/bedrock,jgmize/bedrock,alexgibson/bedrock,jacshfr/mozilla-bedrock,gauthierm/bedrock,SujaySKumar/bedrock,jacshfr/mozilla-bedrock,yglazko/bedrock,jacshfr/mozilla-bedrock,craigcook/bedrock,alexgibson/bedrock,gerv/bedrock,ericawright/bedrock,gerv/bedrock,jgmize/bedrock,kyoshino/bedrock,jpetto/bedrock,jacshfr/mozilla-bedrock,mkmelin/bedrock,jpetto/bedrock,mermi/bedrock,MichaelKohler/bedrock,SujaySKumar/bedrock,mozilla/bedrock,kyoshino/bedrock,l-hedgehog/bedrock,gerv/bedrock,TheoChevalier/bedrock,bensternthal/bedrock,chirilo/bedrock,TheJJ100100/bedrock,hoosteeno/bedrock,davehunt/bedrock,gauthierm/bedrock,CSCI-462-01-2017/bedrock,chirilo/bedrock,TheJJ100100/bedrock,CSCI-462-01-2017/bedrock,pascalchevrel/bedrock,sgarrity/bedrock,andreadelrio/bedrock,mahinthjoe/bedrock,TheoChevalier/bedrock,analytics-pros/mozilla-bedrock,davehunt/bedrock,yglazko/bedrock,andreadelrio/bedrock,schalkneethling/bedrock,flodolo/bedrock,pmclanahan/bedrock,SujaySKumar/bedrock,SujaySKumar/bedrock,petabyte/bedrock,schalkneethling/bedrock,sgarrity/bedrock,CSCI-462-01-2017/bedrock,andreadelrio/bedrock,TheJJ100100/bedrock,dudepare/bedrock,mahinthjoe/bedrock,jgmize/bedrock,l-hedgehog/bedrock,dudepare/bedrock,mozilla/bedrock,gauthierm/bedrock,Jobava/bedrock,sylvestre/bedrock,jpetto/bedrock,kyoshino/bedrock,gerv/bedrock,bensternthal/bedrock,mahinthjoe/bedrock,sgarrity/bedrock,glogiotatidis/bedrock,pascalchevrel/bedrock,alexgibson/bedrock,gauthierm/bedrock,flodolo/bedrock,marcoscaceres/bedrock,schalkneethling/bedrock,bensternthal/bedrock,hoosteeno/bedrock,sylvestre/bedrock,chirilo/bedrock,rishiloyola/bedrock,jpetto/bedrock,malena/bedrock,analytics-pros/mozilla-bedrock,yglazko/bedrock,hoosteeno/bedrock,malena/bedrock,yglazko/bedrock,pmclanahan/bedrock,mozilla/bedrock,schalkneethling/bedrock,TheJJ100100/bedrock,petabyte/bedrock,l-hedgehog/bedrock,mozilla/bedrock,l-hedgehog/bedrock,pmclanahan/bedrock,mkmelin/bedrock,davehunt/bedrock,MichaelKohler/bedrock,analytics-pros/mozilla-bedrock,kyoshino/bedrock,analytics-pros/mozilla-bedrock,mkmelin/bedrock,pascalchevrel/bedrock,craigcook/bedrock,mermi/bedrock,Sancus/bedrock,craigcook/bedrock,jgmize/bedrock,hoosteeno/bedrock,glogiotatidis/bedrock,mahinthjoe/bedrock,alexgibson/bedrock,glogiotatidis/bedrock,TheoChevalier/bedrock,MichaelKohler/bedrock,petabyte/bedrock,Jobava/bedrock,ericawright/bedrock,rishiloyola/bedrock,TheoChevalier/bedrock,andreadelrio/bedrock,sylvestre/bedrock,Jobava/bedrock,glogiotatidis/bedrock
|
Add product-details chief deployment script.
After this merges we'll only need to symlink the
old p-d deployment script to this one, like the
rest of the chief scripts are.
|
"""
Deployment for Bedrock in production.
Requires commander (https://github.com/oremj/commander) which is installed on
the systems that need it.
"""
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from commander.deploy import task, hostgroups
import commander_settings as settings
# Functions below called by chief in this order
@task
def pre_update(ctx, ref=settings.UPDATE_REF):
print "done"
@task
def update(ctx):
with ctx.lcd(settings.SRC_DIR):
ctx.local('bin/update-scripts/prod/update-prod-product-details.sh')
@hostgroups(settings.WEB_HOSTGROUP, remote_kwargs={'ssh_key': settings.SSH_KEY})
def deploy(ctx):
ctx.remote("service httpd graceful")
|
<commit_before><commit_msg>Add product-details chief deployment script.
After this merges we'll only need to symlink the
old p-d deployment script to this one, like the
rest of the chief scripts are.<commit_after>
|
"""
Deployment for Bedrock in production.
Requires commander (https://github.com/oremj/commander) which is installed on
the systems that need it.
"""
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from commander.deploy import task, hostgroups
import commander_settings as settings
# Functions below called by chief in this order
@task
def pre_update(ctx, ref=settings.UPDATE_REF):
print "done"
@task
def update(ctx):
with ctx.lcd(settings.SRC_DIR):
ctx.local('bin/update-scripts/prod/update-prod-product-details.sh')
@hostgroups(settings.WEB_HOSTGROUP, remote_kwargs={'ssh_key': settings.SSH_KEY})
def deploy(ctx):
ctx.remote("service httpd graceful")
|
Add product-details chief deployment script.
After this merges we'll only need to symlink the
old p-d deployment script to this one, like the
rest of the chief scripts are."""
Deployment for Bedrock in production.
Requires commander (https://github.com/oremj/commander) which is installed on
the systems that need it.
"""
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from commander.deploy import task, hostgroups
import commander_settings as settings
# Functions below called by chief in this order
@task
def pre_update(ctx, ref=settings.UPDATE_REF):
print "done"
@task
def update(ctx):
with ctx.lcd(settings.SRC_DIR):
ctx.local('bin/update-scripts/prod/update-prod-product-details.sh')
@hostgroups(settings.WEB_HOSTGROUP, remote_kwargs={'ssh_key': settings.SSH_KEY})
def deploy(ctx):
ctx.remote("service httpd graceful")
|
<commit_before><commit_msg>Add product-details chief deployment script.
After this merges we'll only need to symlink the
old p-d deployment script to this one, like the
rest of the chief scripts are.<commit_after>"""
Deployment for Bedrock in production.
Requires commander (https://github.com/oremj/commander) which is installed on
the systems that need it.
"""
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from commander.deploy import task, hostgroups
import commander_settings as settings
# Functions below called by chief in this order
@task
def pre_update(ctx, ref=settings.UPDATE_REF):
print "done"
@task
def update(ctx):
with ctx.lcd(settings.SRC_DIR):
ctx.local('bin/update-scripts/prod/update-prod-product-details.sh')
@hostgroups(settings.WEB_HOSTGROUP, remote_kwargs={'ssh_key': settings.SSH_KEY})
def deploy(ctx):
ctx.remote("service httpd graceful")
|
|
feb4f46959ff5e2226d968e8a9de849f3883d967
|
scripts/make_bridge_vm.py
|
scripts/make_bridge_vm.py
|
# Setup tapuser for SSH from the CI nodes
useradd tapuser
mkdir -p /home/tapuser/.ssh
chown -R tapuser:tapuser /home/tapuser
chmod 700 /home/tapuser/.ssh
cat project-config-third-party/nodepool/scripts/vm-bridge-key.pub > authorized_keys
chmod 600 /home/tapuser/.ssh/authorized_keys
# Install bridge utils
apt-get install bridge-utils
# Setup two bridges for L2 tunnels
brctl addbr nexusint1
brctl addbr nexusint2
# Setup GRE taps for the L2 tunnels to the node-provider
ip link add nexustap1 type gretap local 10.0.196.33 remote 10.0.196.3 key 1
ip link add nexustap2 type gretap local 10.0.196.33 remote 10.0.196.3 key 2
# Add GRE taps to the bridges
brctl addif nexusint1 nexustap1
brctl addif nexusint2 nexustap2
# Setup bridges and taps state up
ip link set nexustap1 up
ip link set nexustap2 up
ip link set nexusint1 up
ip link set nexusint2 up
# Setup sudo for the tapuser to access brctl and ip commands
cat /etc/sudoers.d/tapuser << EOF
tapuser ALL=(ALL) NOPASSWD: /sbin/brctl
tapuser ALL=(ALL) NOPASSWD: /sbin/ip
EOF
|
Add script for setting up the bridge VM
|
Add script for setting up the bridge VM
|
Python
|
apache-2.0
|
CiscoSystems/project-config-third-party,CiscoSystems/project-config-third-party
|
Add script for setting up the bridge VM
|
# Setup tapuser for SSH from the CI nodes
useradd tapuser
mkdir -p /home/tapuser/.ssh
chown -R tapuser:tapuser /home/tapuser
chmod 700 /home/tapuser/.ssh
cat project-config-third-party/nodepool/scripts/vm-bridge-key.pub > authorized_keys
chmod 600 /home/tapuser/.ssh/authorized_keys
# Install bridge utils
apt-get install bridge-utils
# Setup two bridges for L2 tunnels
brctl addbr nexusint1
brctl addbr nexusint2
# Setup GRE taps for the L2 tunnels to the node-provider
ip link add nexustap1 type gretap local 10.0.196.33 remote 10.0.196.3 key 1
ip link add nexustap2 type gretap local 10.0.196.33 remote 10.0.196.3 key 2
# Add GRE taps to the bridges
brctl addif nexusint1 nexustap1
brctl addif nexusint2 nexustap2
# Setup bridges and taps state up
ip link set nexustap1 up
ip link set nexustap2 up
ip link set nexusint1 up
ip link set nexusint2 up
# Setup sudo for the tapuser to access brctl and ip commands
cat /etc/sudoers.d/tapuser << EOF
tapuser ALL=(ALL) NOPASSWD: /sbin/brctl
tapuser ALL=(ALL) NOPASSWD: /sbin/ip
EOF
|
<commit_before><commit_msg>Add script for setting up the bridge VM<commit_after>
|
# Setup tapuser for SSH from the CI nodes
useradd tapuser
mkdir -p /home/tapuser/.ssh
chown -R tapuser:tapuser /home/tapuser
chmod 700 /home/tapuser/.ssh
cat project-config-third-party/nodepool/scripts/vm-bridge-key.pub > authorized_keys
chmod 600 /home/tapuser/.ssh/authorized_keys
# Install bridge utils
apt-get install bridge-utils
# Setup two bridges for L2 tunnels
brctl addbr nexusint1
brctl addbr nexusint2
# Setup GRE taps for the L2 tunnels to the node-provider
ip link add nexustap1 type gretap local 10.0.196.33 remote 10.0.196.3 key 1
ip link add nexustap2 type gretap local 10.0.196.33 remote 10.0.196.3 key 2
# Add GRE taps to the bridges
brctl addif nexusint1 nexustap1
brctl addif nexusint2 nexustap2
# Setup bridges and taps state up
ip link set nexustap1 up
ip link set nexustap2 up
ip link set nexusint1 up
ip link set nexusint2 up
# Setup sudo for the tapuser to access brctl and ip commands
cat /etc/sudoers.d/tapuser << EOF
tapuser ALL=(ALL) NOPASSWD: /sbin/brctl
tapuser ALL=(ALL) NOPASSWD: /sbin/ip
EOF
|
Add script for setting up the bridge VM# Setup tapuser for SSH from the CI nodes
useradd tapuser
mkdir -p /home/tapuser/.ssh
chown -R tapuser:tapuser /home/tapuser
chmod 700 /home/tapuser/.ssh
cat project-config-third-party/nodepool/scripts/vm-bridge-key.pub > authorized_keys
chmod 600 /home/tapuser/.ssh/authorized_keys
# Install bridge utils
apt-get install bridge-utils
# Setup two bridges for L2 tunnels
brctl addbr nexusint1
brctl addbr nexusint2
# Setup GRE taps for the L2 tunnels to the node-provider
ip link add nexustap1 type gretap local 10.0.196.33 remote 10.0.196.3 key 1
ip link add nexustap2 type gretap local 10.0.196.33 remote 10.0.196.3 key 2
# Add GRE taps to the bridges
brctl addif nexusint1 nexustap1
brctl addif nexusint2 nexustap2
# Setup bridges and taps state up
ip link set nexustap1 up
ip link set nexustap2 up
ip link set nexusint1 up
ip link set nexusint2 up
# Setup sudo for the tapuser to access brctl and ip commands
cat /etc/sudoers.d/tapuser << EOF
tapuser ALL=(ALL) NOPASSWD: /sbin/brctl
tapuser ALL=(ALL) NOPASSWD: /sbin/ip
EOF
|
<commit_before><commit_msg>Add script for setting up the bridge VM<commit_after># Setup tapuser for SSH from the CI nodes
useradd tapuser
mkdir -p /home/tapuser/.ssh
chown -R tapuser:tapuser /home/tapuser
chmod 700 /home/tapuser/.ssh
cat project-config-third-party/nodepool/scripts/vm-bridge-key.pub > authorized_keys
chmod 600 /home/tapuser/.ssh/authorized_keys
# Install bridge utils
apt-get install bridge-utils
# Setup two bridges for L2 tunnels
brctl addbr nexusint1
brctl addbr nexusint2
# Setup GRE taps for the L2 tunnels to the node-provider
ip link add nexustap1 type gretap local 10.0.196.33 remote 10.0.196.3 key 1
ip link add nexustap2 type gretap local 10.0.196.33 remote 10.0.196.3 key 2
# Add GRE taps to the bridges
brctl addif nexusint1 nexustap1
brctl addif nexusint2 nexustap2
# Setup bridges and taps state up
ip link set nexustap1 up
ip link set nexustap2 up
ip link set nexusint1 up
ip link set nexusint2 up
# Setup sudo for the tapuser to access brctl and ip commands
cat /etc/sudoers.d/tapuser << EOF
tapuser ALL=(ALL) NOPASSWD: /sbin/brctl
tapuser ALL=(ALL) NOPASSWD: /sbin/ip
EOF
|
|
59626e34c7938fddeec140522dd2a592ba4f42ef
|
examples/simplex3_plot.py
|
examples/simplex3_plot.py
|
"""
Project and visualize a simplex grid on the 3-simplex.
"""
from __future__ import division
import dit
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def simplex3_vertices():
"""
Returns the vertices of the standard 3-simplex. Each column is a vertex.
"""
v = np.array([
[1, 0, 0],
[-1/3, +np.sqrt(8)/3, 0],
[-1/3, -np.sqrt(2)/3, +np.sqrt(2/3)],
[-1/3, -np.sqrt(2)/3, -np.sqrt(2/3)],
])
return v.transpose()
def plot_simplex_vertices(ax=None):
if ax is None:
f = plt.figure()
ax = f.add_subplot(111, projection='3d')
vertices = simplex3_vertices()
lines = np.array([
vertices[:,0],
vertices[:,1],
vertices[:,2],
vertices[:,0],
vertices[:,3],
vertices[:,1],
vertices[:,3],
vertices[:,2]
])
lines = lines.transpose()
out = ax.plot(lines[0], lines[1], lines[2])
ax.set_axis_off()
ax.set_aspect('equal')
return ax, out, vertices
def plot_simplex_grid(subdivisions, ax=None):
ax, out, vertices = plot_simplex_vertices(ax=ax)
grid = dit.simplex_grid(4, subdivisions)
projections = []
for dist in grid:
pmf = dist.pmf
proj = (pmf * vertices).sum(axis=1)
projections.append(proj)
projections = np.array(projections)
projections = projections.transpose()
out = ax.scatter(projections[0], projections[1], projections[2], s=10)
return ax, out, projections
if __name__ == '__main__':
plot_simplex_grid(9)
plt.show()
|
Add an example which plots a simplex_grid in 3D.
|
Add an example which plots a simplex_grid in 3D.
|
Python
|
bsd-3-clause
|
chebee7i/dit,chebee7i/dit,chebee7i/dit,chebee7i/dit
|
Add an example which plots a simplex_grid in 3D.
|
"""
Project and visualize a simplex grid on the 3-simplex.
"""
from __future__ import division
import dit
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def simplex3_vertices():
"""
Returns the vertices of the standard 3-simplex. Each column is a vertex.
"""
v = np.array([
[1, 0, 0],
[-1/3, +np.sqrt(8)/3, 0],
[-1/3, -np.sqrt(2)/3, +np.sqrt(2/3)],
[-1/3, -np.sqrt(2)/3, -np.sqrt(2/3)],
])
return v.transpose()
def plot_simplex_vertices(ax=None):
if ax is None:
f = plt.figure()
ax = f.add_subplot(111, projection='3d')
vertices = simplex3_vertices()
lines = np.array([
vertices[:,0],
vertices[:,1],
vertices[:,2],
vertices[:,0],
vertices[:,3],
vertices[:,1],
vertices[:,3],
vertices[:,2]
])
lines = lines.transpose()
out = ax.plot(lines[0], lines[1], lines[2])
ax.set_axis_off()
ax.set_aspect('equal')
return ax, out, vertices
def plot_simplex_grid(subdivisions, ax=None):
ax, out, vertices = plot_simplex_vertices(ax=ax)
grid = dit.simplex_grid(4, subdivisions)
projections = []
for dist in grid:
pmf = dist.pmf
proj = (pmf * vertices).sum(axis=1)
projections.append(proj)
projections = np.array(projections)
projections = projections.transpose()
out = ax.scatter(projections[0], projections[1], projections[2], s=10)
return ax, out, projections
if __name__ == '__main__':
plot_simplex_grid(9)
plt.show()
|
<commit_before><commit_msg>Add an example which plots a simplex_grid in 3D.<commit_after>
|
"""
Project and visualize a simplex grid on the 3-simplex.
"""
from __future__ import division
import dit
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def simplex3_vertices():
"""
Returns the vertices of the standard 3-simplex. Each column is a vertex.
"""
v = np.array([
[1, 0, 0],
[-1/3, +np.sqrt(8)/3, 0],
[-1/3, -np.sqrt(2)/3, +np.sqrt(2/3)],
[-1/3, -np.sqrt(2)/3, -np.sqrt(2/3)],
])
return v.transpose()
def plot_simplex_vertices(ax=None):
if ax is None:
f = plt.figure()
ax = f.add_subplot(111, projection='3d')
vertices = simplex3_vertices()
lines = np.array([
vertices[:,0],
vertices[:,1],
vertices[:,2],
vertices[:,0],
vertices[:,3],
vertices[:,1],
vertices[:,3],
vertices[:,2]
])
lines = lines.transpose()
out = ax.plot(lines[0], lines[1], lines[2])
ax.set_axis_off()
ax.set_aspect('equal')
return ax, out, vertices
def plot_simplex_grid(subdivisions, ax=None):
ax, out, vertices = plot_simplex_vertices(ax=ax)
grid = dit.simplex_grid(4, subdivisions)
projections = []
for dist in grid:
pmf = dist.pmf
proj = (pmf * vertices).sum(axis=1)
projections.append(proj)
projections = np.array(projections)
projections = projections.transpose()
out = ax.scatter(projections[0], projections[1], projections[2], s=10)
return ax, out, projections
if __name__ == '__main__':
plot_simplex_grid(9)
plt.show()
|
Add an example which plots a simplex_grid in 3D."""
Project and visualize a simplex grid on the 3-simplex.
"""
from __future__ import division
import dit
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def simplex3_vertices():
"""
Returns the vertices of the standard 3-simplex. Each column is a vertex.
"""
v = np.array([
[1, 0, 0],
[-1/3, +np.sqrt(8)/3, 0],
[-1/3, -np.sqrt(2)/3, +np.sqrt(2/3)],
[-1/3, -np.sqrt(2)/3, -np.sqrt(2/3)],
])
return v.transpose()
def plot_simplex_vertices(ax=None):
if ax is None:
f = plt.figure()
ax = f.add_subplot(111, projection='3d')
vertices = simplex3_vertices()
lines = np.array([
vertices[:,0],
vertices[:,1],
vertices[:,2],
vertices[:,0],
vertices[:,3],
vertices[:,1],
vertices[:,3],
vertices[:,2]
])
lines = lines.transpose()
out = ax.plot(lines[0], lines[1], lines[2])
ax.set_axis_off()
ax.set_aspect('equal')
return ax, out, vertices
def plot_simplex_grid(subdivisions, ax=None):
ax, out, vertices = plot_simplex_vertices(ax=ax)
grid = dit.simplex_grid(4, subdivisions)
projections = []
for dist in grid:
pmf = dist.pmf
proj = (pmf * vertices).sum(axis=1)
projections.append(proj)
projections = np.array(projections)
projections = projections.transpose()
out = ax.scatter(projections[0], projections[1], projections[2], s=10)
return ax, out, projections
if __name__ == '__main__':
plot_simplex_grid(9)
plt.show()
|
<commit_before><commit_msg>Add an example which plots a simplex_grid in 3D.<commit_after>"""
Project and visualize a simplex grid on the 3-simplex.
"""
from __future__ import division
import dit
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def simplex3_vertices():
"""
Returns the vertices of the standard 3-simplex. Each column is a vertex.
"""
v = np.array([
[1, 0, 0],
[-1/3, +np.sqrt(8)/3, 0],
[-1/3, -np.sqrt(2)/3, +np.sqrt(2/3)],
[-1/3, -np.sqrt(2)/3, -np.sqrt(2/3)],
])
return v.transpose()
def plot_simplex_vertices(ax=None):
if ax is None:
f = plt.figure()
ax = f.add_subplot(111, projection='3d')
vertices = simplex3_vertices()
lines = np.array([
vertices[:,0],
vertices[:,1],
vertices[:,2],
vertices[:,0],
vertices[:,3],
vertices[:,1],
vertices[:,3],
vertices[:,2]
])
lines = lines.transpose()
out = ax.plot(lines[0], lines[1], lines[2])
ax.set_axis_off()
ax.set_aspect('equal')
return ax, out, vertices
def plot_simplex_grid(subdivisions, ax=None):
ax, out, vertices = plot_simplex_vertices(ax=ax)
grid = dit.simplex_grid(4, subdivisions)
projections = []
for dist in grid:
pmf = dist.pmf
proj = (pmf * vertices).sum(axis=1)
projections.append(proj)
projections = np.array(projections)
projections = projections.transpose()
out = ax.scatter(projections[0], projections[1], projections[2], s=10)
return ax, out, projections
if __name__ == '__main__':
plot_simplex_grid(9)
plt.show()
|
|
1c55fdc21062a7090a76fc7316c93d6ed4647d53
|
tests/rules_tests/grammarManipulation_tests/RemoveTest.py
|
tests/rules_tests/grammarManipulation_tests/RemoveTest.py
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import main, TestCase
from grammpy import Rule as _R, Grammar
from rules_tests.grammar import *
class RemoveTest(TestCase):
def __init__(self, *args):
super().__init__(*args)
self.g = Grammar()
def setUp(self):
g = Grammar()
g.add_term([0, 1, 2, 'a', 'b', 'c'])
g.add_nonterm([NFirst, NSecond, NThird, NFourth])
self.g = g
if __name__ == '__main__':
main()
|
Add file for rule remove tests
|
Add file for rule remove tests
|
Python
|
mit
|
PatrikValkovic/grammpy
|
Add file for rule remove tests
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import main, TestCase
from grammpy import Rule as _R, Grammar
from rules_tests.grammar import *
class RemoveTest(TestCase):
def __init__(self, *args):
super().__init__(*args)
self.g = Grammar()
def setUp(self):
g = Grammar()
g.add_term([0, 1, 2, 'a', 'b', 'c'])
g.add_nonterm([NFirst, NSecond, NThird, NFourth])
self.g = g
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add file for rule remove tests<commit_after>
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import main, TestCase
from grammpy import Rule as _R, Grammar
from rules_tests.grammar import *
class RemoveTest(TestCase):
def __init__(self, *args):
super().__init__(*args)
self.g = Grammar()
def setUp(self):
g = Grammar()
g.add_term([0, 1, 2, 'a', 'b', 'c'])
g.add_nonterm([NFirst, NSecond, NThird, NFourth])
self.g = g
if __name__ == '__main__':
main()
|
Add file for rule remove tests#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import main, TestCase
from grammpy import Rule as _R, Grammar
from rules_tests.grammar import *
class RemoveTest(TestCase):
def __init__(self, *args):
super().__init__(*args)
self.g = Grammar()
def setUp(self):
g = Grammar()
g.add_term([0, 1, 2, 'a', 'b', 'c'])
g.add_nonterm([NFirst, NSecond, NThird, NFourth])
self.g = g
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add file for rule remove tests<commit_after>#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import main, TestCase
from grammpy import Rule as _R, Grammar
from rules_tests.grammar import *
class RemoveTest(TestCase):
def __init__(self, *args):
super().__init__(*args)
self.g = Grammar()
def setUp(self):
g = Grammar()
g.add_term([0, 1, 2, 'a', 'b', 'c'])
g.add_nonterm([NFirst, NSecond, NThird, NFourth])
self.g = g
if __name__ == '__main__':
main()
|
|
f22d1e7d39a282f12f92140b552c3d6751135ae1
|
simples3/gae.py
|
simples3/gae.py
|
"""Compatibility layer for Google App Engine
Use as you would normally do with :mod:`simples3`, only instead of
:class:`simples3.S3Bucket`, use :class:`simples3.gae.AppEngineS3Bucket`.
"""
import urllib2
from StringIO import StringIO
from urllib import addinfourl
from google.appengine.api import urlfetch
from simples3.bucket import S3Bucket
class _FakeDict(list):
def iteritems(self):
return self
def _http_open(req):
resp = urlfetch.fetch(req.get_full_url(),
payload=req.get_data(),
method=req.get_method(),
headers=_FakeDict(req.header_items()))
fp = StringIO(resp.content)
rv = addinfourl(fp, resp.headers, req.get_full_url())
rv.code = resp.status_code
rv.msg = "?"
return rv
class UrlFetchHTTPHandler(urllib2.HTTPHandler):
def http_open(self, req):
return _http_open(req)
class UrlFetchHTTPSHandler(urllib2.HTTPSHandler):
def https_open(self, req):
return _http_open(req)
class AppEngineS3Bucket(S3Bucket):
@classmethod
def build_opener(cls):
# urllib likes to import ctypes. Why? Because on OS X, it uses it to
# find proxy configurations. While that is nice and all (and a huge
# f---ing kludge), it makes the GAE development server bork because the
# platform makes urllib import ctypes, and that's not permissible on
# App Engine (can't load dynamic libraries at all.)
#
# Giving urllib2 a ProxyHandler without any proxies avoids this look-up
# trickery, and so is beneficial to our ends and goals in this pickle
# of a situation.
return urllib2.build_opener(UrlFetchHTTPHandler, UrlFetchHTTPSHandler,
urllib2.ProxyHandler(proxies={}))
|
Add Google App Engine helper module
|
Add Google App Engine helper module
|
Python
|
bsd-2-clause
|
sirkamran32/simples3,leo23/simples3,lericson/simples3
|
Add Google App Engine helper module
|
"""Compatibility layer for Google App Engine
Use as you would normally do with :mod:`simples3`, only instead of
:class:`simples3.S3Bucket`, use :class:`simples3.gae.AppEngineS3Bucket`.
"""
import urllib2
from StringIO import StringIO
from urllib import addinfourl
from google.appengine.api import urlfetch
from simples3.bucket import S3Bucket
class _FakeDict(list):
def iteritems(self):
return self
def _http_open(req):
resp = urlfetch.fetch(req.get_full_url(),
payload=req.get_data(),
method=req.get_method(),
headers=_FakeDict(req.header_items()))
fp = StringIO(resp.content)
rv = addinfourl(fp, resp.headers, req.get_full_url())
rv.code = resp.status_code
rv.msg = "?"
return rv
class UrlFetchHTTPHandler(urllib2.HTTPHandler):
def http_open(self, req):
return _http_open(req)
class UrlFetchHTTPSHandler(urllib2.HTTPSHandler):
def https_open(self, req):
return _http_open(req)
class AppEngineS3Bucket(S3Bucket):
@classmethod
def build_opener(cls):
# urllib likes to import ctypes. Why? Because on OS X, it uses it to
# find proxy configurations. While that is nice and all (and a huge
# f---ing kludge), it makes the GAE development server bork because the
# platform makes urllib import ctypes, and that's not permissible on
# App Engine (can't load dynamic libraries at all.)
#
# Giving urllib2 a ProxyHandler without any proxies avoids this look-up
# trickery, and so is beneficial to our ends and goals in this pickle
# of a situation.
return urllib2.build_opener(UrlFetchHTTPHandler, UrlFetchHTTPSHandler,
urllib2.ProxyHandler(proxies={}))
|
<commit_before><commit_msg>Add Google App Engine helper module<commit_after>
|
"""Compatibility layer for Google App Engine
Use as you would normally do with :mod:`simples3`, only instead of
:class:`simples3.S3Bucket`, use :class:`simples3.gae.AppEngineS3Bucket`.
"""
import urllib2
from StringIO import StringIO
from urllib import addinfourl
from google.appengine.api import urlfetch
from simples3.bucket import S3Bucket
class _FakeDict(list):
def iteritems(self):
return self
def _http_open(req):
resp = urlfetch.fetch(req.get_full_url(),
payload=req.get_data(),
method=req.get_method(),
headers=_FakeDict(req.header_items()))
fp = StringIO(resp.content)
rv = addinfourl(fp, resp.headers, req.get_full_url())
rv.code = resp.status_code
rv.msg = "?"
return rv
class UrlFetchHTTPHandler(urllib2.HTTPHandler):
def http_open(self, req):
return _http_open(req)
class UrlFetchHTTPSHandler(urllib2.HTTPSHandler):
def https_open(self, req):
return _http_open(req)
class AppEngineS3Bucket(S3Bucket):
@classmethod
def build_opener(cls):
# urllib likes to import ctypes. Why? Because on OS X, it uses it to
# find proxy configurations. While that is nice and all (and a huge
# f---ing kludge), it makes the GAE development server bork because the
# platform makes urllib import ctypes, and that's not permissible on
# App Engine (can't load dynamic libraries at all.)
#
# Giving urllib2 a ProxyHandler without any proxies avoids this look-up
# trickery, and so is beneficial to our ends and goals in this pickle
# of a situation.
return urllib2.build_opener(UrlFetchHTTPHandler, UrlFetchHTTPSHandler,
urllib2.ProxyHandler(proxies={}))
|
Add Google App Engine helper module"""Compatibility layer for Google App Engine
Use as you would normally do with :mod:`simples3`, only instead of
:class:`simples3.S3Bucket`, use :class:`simples3.gae.AppEngineS3Bucket`.
"""
import urllib2
from StringIO import StringIO
from urllib import addinfourl
from google.appengine.api import urlfetch
from simples3.bucket import S3Bucket
class _FakeDict(list):
def iteritems(self):
return self
def _http_open(req):
resp = urlfetch.fetch(req.get_full_url(),
payload=req.get_data(),
method=req.get_method(),
headers=_FakeDict(req.header_items()))
fp = StringIO(resp.content)
rv = addinfourl(fp, resp.headers, req.get_full_url())
rv.code = resp.status_code
rv.msg = "?"
return rv
class UrlFetchHTTPHandler(urllib2.HTTPHandler):
def http_open(self, req):
return _http_open(req)
class UrlFetchHTTPSHandler(urllib2.HTTPSHandler):
def https_open(self, req):
return _http_open(req)
class AppEngineS3Bucket(S3Bucket):
@classmethod
def build_opener(cls):
# urllib likes to import ctypes. Why? Because on OS X, it uses it to
# find proxy configurations. While that is nice and all (and a huge
# f---ing kludge), it makes the GAE development server bork because the
# platform makes urllib import ctypes, and that's not permissible on
# App Engine (can't load dynamic libraries at all.)
#
# Giving urllib2 a ProxyHandler without any proxies avoids this look-up
# trickery, and so is beneficial to our ends and goals in this pickle
# of a situation.
return urllib2.build_opener(UrlFetchHTTPHandler, UrlFetchHTTPSHandler,
urllib2.ProxyHandler(proxies={}))
|
<commit_before><commit_msg>Add Google App Engine helper module<commit_after>"""Compatibility layer for Google App Engine
Use as you would normally do with :mod:`simples3`, only instead of
:class:`simples3.S3Bucket`, use :class:`simples3.gae.AppEngineS3Bucket`.
"""
import urllib2
from StringIO import StringIO
from urllib import addinfourl
from google.appengine.api import urlfetch
from simples3.bucket import S3Bucket
class _FakeDict(list):
def iteritems(self):
return self
def _http_open(req):
resp = urlfetch.fetch(req.get_full_url(),
payload=req.get_data(),
method=req.get_method(),
headers=_FakeDict(req.header_items()))
fp = StringIO(resp.content)
rv = addinfourl(fp, resp.headers, req.get_full_url())
rv.code = resp.status_code
rv.msg = "?"
return rv
class UrlFetchHTTPHandler(urllib2.HTTPHandler):
def http_open(self, req):
return _http_open(req)
class UrlFetchHTTPSHandler(urllib2.HTTPSHandler):
def https_open(self, req):
return _http_open(req)
class AppEngineS3Bucket(S3Bucket):
@classmethod
def build_opener(cls):
# urllib likes to import ctypes. Why? Because on OS X, it uses it to
# find proxy configurations. While that is nice and all (and a huge
# f---ing kludge), it makes the GAE development server bork because the
# platform makes urllib import ctypes, and that's not permissible on
# App Engine (can't load dynamic libraries at all.)
#
# Giving urllib2 a ProxyHandler without any proxies avoids this look-up
# trickery, and so is beneficial to our ends and goals in this pickle
# of a situation.
return urllib2.build_opener(UrlFetchHTTPHandler, UrlFetchHTTPSHandler,
urllib2.ProxyHandler(proxies={}))
|
|
370d0604cf3abebc56f0a39efcf3cc5c3d908808
|
nex_profile.py
|
nex_profile.py
|
import cProfile as prof
import os.path as opath
from nex.nex import run_file
from nex.utils import get_default_font_paths
from nex.box_writer import write_to_dvi_file
dir_path = opath.dirname(opath.realpath(__file__))
font_search_paths = get_default_font_paths() + [
dir_path,
opath.join(dir_path, 'fonts'),
]
def t():
state = run_file("tex/test.tex", font_search_paths)
write_to_dvi_file(state, 'prof_out.dvi', write_pdf=True)
prof.run('t()', 'prof_stats')
|
Add little script used for profiling
|
Add little script used for profiling
|
Python
|
mit
|
eddiejessup/nex
|
Add little script used for profiling
|
import cProfile as prof
import os.path as opath
from nex.nex import run_file
from nex.utils import get_default_font_paths
from nex.box_writer import write_to_dvi_file
dir_path = opath.dirname(opath.realpath(__file__))
font_search_paths = get_default_font_paths() + [
dir_path,
opath.join(dir_path, 'fonts'),
]
def t():
state = run_file("tex/test.tex", font_search_paths)
write_to_dvi_file(state, 'prof_out.dvi', write_pdf=True)
prof.run('t()', 'prof_stats')
|
<commit_before><commit_msg>Add little script used for profiling<commit_after>
|
import cProfile as prof
import os.path as opath
from nex.nex import run_file
from nex.utils import get_default_font_paths
from nex.box_writer import write_to_dvi_file
dir_path = opath.dirname(opath.realpath(__file__))
font_search_paths = get_default_font_paths() + [
dir_path,
opath.join(dir_path, 'fonts'),
]
def t():
state = run_file("tex/test.tex", font_search_paths)
write_to_dvi_file(state, 'prof_out.dvi', write_pdf=True)
prof.run('t()', 'prof_stats')
|
Add little script used for profilingimport cProfile as prof
import os.path as opath
from nex.nex import run_file
from nex.utils import get_default_font_paths
from nex.box_writer import write_to_dvi_file
dir_path = opath.dirname(opath.realpath(__file__))
font_search_paths = get_default_font_paths() + [
dir_path,
opath.join(dir_path, 'fonts'),
]
def t():
state = run_file("tex/test.tex", font_search_paths)
write_to_dvi_file(state, 'prof_out.dvi', write_pdf=True)
prof.run('t()', 'prof_stats')
|
<commit_before><commit_msg>Add little script used for profiling<commit_after>import cProfile as prof
import os.path as opath
from nex.nex import run_file
from nex.utils import get_default_font_paths
from nex.box_writer import write_to_dvi_file
dir_path = opath.dirname(opath.realpath(__file__))
font_search_paths = get_default_font_paths() + [
dir_path,
opath.join(dir_path, 'fonts'),
]
def t():
state = run_file("tex/test.tex", font_search_paths)
write_to_dvi_file(state, 'prof_out.dvi', write_pdf=True)
prof.run('t()', 'prof_stats')
|
|
2ed5e9386678cbf03626dce943eab54b5f082cfd
|
storage/mongo_storage.py
|
storage/mongo_storage.py
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/
'''
Storage module to interact with MongoDB.
Provides a MongoStorage helper class with the following
functions:
setup: initialize the Mongo client connection
store: takes in a report dictionary and posts it to
mongo instance. Returns a list of report id's.
get_report: Given a report_id (a sha256 hash), return
the report.
delete: Given a report_id (a sha256 hash), delete the
specified report.
'''
from storage import Storage
from uuid import uuid4
from pymongo import MongoClient
class MongoStorage(Storage):
'''
Subclass of Storage. Allows user to interact
with backend Mongo database
'''
DEFAULTCONF = {
'ENABLED': False,
'host': 'localhost',
'port': 27017,
'database': 'multiscanner_reports',
'collection': 'reports',
}
def setup(self):
self.host = self.config['host']
self.port = self.config['port']
self.client = MongoClient(
host=self.host,
port=self.port
)
self.database = getattr(self.client, self.config['database'])
self.collection = getattr(self.database, self.config['collection'])
return True
def store(self, report):
report_id_list = []
for filename in report:
report[filename]['filename'] = filename
try:
report_id = report[filename]['SHA256']
except KeyError:
report_id = uuid4()
report_id_list.append(report_id)
self.collection.update(
{'_id': report_id},
report[filename],
True
)
return report_id_list
def get_report(self, report_id):
result = self.collection.find({'_id': report_id})
if result.count == 0:
return json.dumps({})
return json.dumps(result[0])
def delete(self, report_id):
result = self.collection.delete_one({'_id': report_id})
if result.deleted_count == 0:
return False
return True
|
Add storage module for mongo DB
|
Add storage module for mongo DB
|
Python
|
mpl-2.0
|
mitre/multiscanner,MITRECND/multiscanner,jmlong1027/multiscanner,mitre/multiscanner,mitre/multiscanner,jmlong1027/multiscanner,awest1339/multiscanner,MITRECND/multiscanner,awest1339/multiscanner,jmlong1027/multiscanner,awest1339/multiscanner,awest1339/multiscanner,jmlong1027/multiscanner
|
Add storage module for mongo DB
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/
'''
Storage module to interact with MongoDB.
Provides a MongoStorage helper class with the following
functions:
setup: initialize the Mongo client connection
store: takes in a report dictionary and posts it to
mongo instance. Returns a list of report id's.
get_report: Given a report_id (a sha256 hash), return
the report.
delete: Given a report_id (a sha256 hash), delete the
specified report.
'''
from storage import Storage
from uuid import uuid4
from pymongo import MongoClient
class MongoStorage(Storage):
'''
Subclass of Storage. Allows user to interact
with backend Mongo database
'''
DEFAULTCONF = {
'ENABLED': False,
'host': 'localhost',
'port': 27017,
'database': 'multiscanner_reports',
'collection': 'reports',
}
def setup(self):
self.host = self.config['host']
self.port = self.config['port']
self.client = MongoClient(
host=self.host,
port=self.port
)
self.database = getattr(self.client, self.config['database'])
self.collection = getattr(self.database, self.config['collection'])
return True
def store(self, report):
report_id_list = []
for filename in report:
report[filename]['filename'] = filename
try:
report_id = report[filename]['SHA256']
except KeyError:
report_id = uuid4()
report_id_list.append(report_id)
self.collection.update(
{'_id': report_id},
report[filename],
True
)
return report_id_list
def get_report(self, report_id):
result = self.collection.find({'_id': report_id})
if result.count == 0:
return json.dumps({})
return json.dumps(result[0])
def delete(self, report_id):
result = self.collection.delete_one({'_id': report_id})
if result.deleted_count == 0:
return False
return True
|
<commit_before><commit_msg>Add storage module for mongo DB<commit_after>
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/
'''
Storage module to interact with MongoDB.
Provides a MongoStorage helper class with the following
functions:
setup: initialize the Mongo client connection
store: takes in a report dictionary and posts it to
mongo instance. Returns a list of report id's.
get_report: Given a report_id (a sha256 hash), return
the report.
delete: Given a report_id (a sha256 hash), delete the
specified report.
'''
from storage import Storage
from uuid import uuid4
from pymongo import MongoClient
class MongoStorage(Storage):
'''
Subclass of Storage. Allows user to interact
with backend Mongo database
'''
DEFAULTCONF = {
'ENABLED': False,
'host': 'localhost',
'port': 27017,
'database': 'multiscanner_reports',
'collection': 'reports',
}
def setup(self):
self.host = self.config['host']
self.port = self.config['port']
self.client = MongoClient(
host=self.host,
port=self.port
)
self.database = getattr(self.client, self.config['database'])
self.collection = getattr(self.database, self.config['collection'])
return True
def store(self, report):
report_id_list = []
for filename in report:
report[filename]['filename'] = filename
try:
report_id = report[filename]['SHA256']
except KeyError:
report_id = uuid4()
report_id_list.append(report_id)
self.collection.update(
{'_id': report_id},
report[filename],
True
)
return report_id_list
def get_report(self, report_id):
result = self.collection.find({'_id': report_id})
if result.count == 0:
return json.dumps({})
return json.dumps(result[0])
def delete(self, report_id):
result = self.collection.delete_one({'_id': report_id})
if result.deleted_count == 0:
return False
return True
|
Add storage module for mongo DB# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/
'''
Storage module to interact with MongoDB.
Provides a MongoStorage helper class with the following
functions:
setup: initialize the Mongo client connection
store: takes in a report dictionary and posts it to
mongo instance. Returns a list of report id's.
get_report: Given a report_id (a sha256 hash), return
the report.
delete: Given a report_id (a sha256 hash), delete the
specified report.
'''
from storage import Storage
from uuid import uuid4
from pymongo import MongoClient
class MongoStorage(Storage):
'''
Subclass of Storage. Allows user to interact
with backend Mongo database
'''
DEFAULTCONF = {
'ENABLED': False,
'host': 'localhost',
'port': 27017,
'database': 'multiscanner_reports',
'collection': 'reports',
}
def setup(self):
self.host = self.config['host']
self.port = self.config['port']
self.client = MongoClient(
host=self.host,
port=self.port
)
self.database = getattr(self.client, self.config['database'])
self.collection = getattr(self.database, self.config['collection'])
return True
def store(self, report):
report_id_list = []
for filename in report:
report[filename]['filename'] = filename
try:
report_id = report[filename]['SHA256']
except KeyError:
report_id = uuid4()
report_id_list.append(report_id)
self.collection.update(
{'_id': report_id},
report[filename],
True
)
return report_id_list
def get_report(self, report_id):
result = self.collection.find({'_id': report_id})
if result.count == 0:
return json.dumps({})
return json.dumps(result[0])
def delete(self, report_id):
result = self.collection.delete_one({'_id': report_id})
if result.deleted_count == 0:
return False
return True
|
<commit_before><commit_msg>Add storage module for mongo DB<commit_after># This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/
'''
Storage module to interact with MongoDB.
Provides a MongoStorage helper class with the following
functions:
setup: initialize the Mongo client connection
store: takes in a report dictionary and posts it to
mongo instance. Returns a list of report id's.
get_report: Given a report_id (a sha256 hash), return
the report.
delete: Given a report_id (a sha256 hash), delete the
specified report.
'''
from storage import Storage
from uuid import uuid4
from pymongo import MongoClient
class MongoStorage(Storage):
'''
Subclass of Storage. Allows user to interact
with backend Mongo database
'''
DEFAULTCONF = {
'ENABLED': False,
'host': 'localhost',
'port': 27017,
'database': 'multiscanner_reports',
'collection': 'reports',
}
def setup(self):
self.host = self.config['host']
self.port = self.config['port']
self.client = MongoClient(
host=self.host,
port=self.port
)
self.database = getattr(self.client, self.config['database'])
self.collection = getattr(self.database, self.config['collection'])
return True
def store(self, report):
report_id_list = []
for filename in report:
report[filename]['filename'] = filename
try:
report_id = report[filename]['SHA256']
except KeyError:
report_id = uuid4()
report_id_list.append(report_id)
self.collection.update(
{'_id': report_id},
report[filename],
True
)
return report_id_list
def get_report(self, report_id):
result = self.collection.find({'_id': report_id})
if result.count == 0:
return json.dumps({})
return json.dumps(result[0])
def delete(self, report_id):
result = self.collection.delete_one({'_id': report_id})
if result.deleted_count == 0:
return False
return True
|
|
9c5e65b844ef8ac0b6a864a6edb83a827506809c
|
tests/test_signatures.py
|
tests/test_signatures.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# (c) Camille Scott, 2019
# File : test_signatures.py
# License: MIT
# Author : Camille Scott <camille.scott.w@gmail.com>
# Date : 16.10.2019
import pytest
from boink.signatures import SourmashSignature
from .utils import *
import screed
def test_sourmash_signature(datadir, ksize):
import sourmash
rfile = datadir('random-20-a.fa')
boink_sig = SourmashSignature.Signature.build(10000, 31, False, 42, 0)
sourmash_sig = sourmash.MinHash(10000, 31)
processor = SourmashSignature.Processor.build(boink_sig)
processor.process(rfile)
for record in screed.open(rfile):
sourmash_sig.add_sequence(record.sequence)
boink_mh = boink_sig.to_sourmash()
assert boink_mh.similarity(sourmash_sig) == 1.0
|
Test that sourmash signature processor results match sourmash library
|
Test that sourmash signature processor results match sourmash library
|
Python
|
mit
|
camillescott/boink,camillescott/boink,camillescott/boink,camillescott/boink
|
Test that sourmash signature processor results match sourmash library
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# (c) Camille Scott, 2019
# File : test_signatures.py
# License: MIT
# Author : Camille Scott <camille.scott.w@gmail.com>
# Date : 16.10.2019
import pytest
from boink.signatures import SourmashSignature
from .utils import *
import screed
def test_sourmash_signature(datadir, ksize):
import sourmash
rfile = datadir('random-20-a.fa')
boink_sig = SourmashSignature.Signature.build(10000, 31, False, 42, 0)
sourmash_sig = sourmash.MinHash(10000, 31)
processor = SourmashSignature.Processor.build(boink_sig)
processor.process(rfile)
for record in screed.open(rfile):
sourmash_sig.add_sequence(record.sequence)
boink_mh = boink_sig.to_sourmash()
assert boink_mh.similarity(sourmash_sig) == 1.0
|
<commit_before><commit_msg>Test that sourmash signature processor results match sourmash library<commit_after>
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# (c) Camille Scott, 2019
# File : test_signatures.py
# License: MIT
# Author : Camille Scott <camille.scott.w@gmail.com>
# Date : 16.10.2019
import pytest
from boink.signatures import SourmashSignature
from .utils import *
import screed
def test_sourmash_signature(datadir, ksize):
import sourmash
rfile = datadir('random-20-a.fa')
boink_sig = SourmashSignature.Signature.build(10000, 31, False, 42, 0)
sourmash_sig = sourmash.MinHash(10000, 31)
processor = SourmashSignature.Processor.build(boink_sig)
processor.process(rfile)
for record in screed.open(rfile):
sourmash_sig.add_sequence(record.sequence)
boink_mh = boink_sig.to_sourmash()
assert boink_mh.similarity(sourmash_sig) == 1.0
|
Test that sourmash signature processor results match sourmash library#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# (c) Camille Scott, 2019
# File : test_signatures.py
# License: MIT
# Author : Camille Scott <camille.scott.w@gmail.com>
# Date : 16.10.2019
import pytest
from boink.signatures import SourmashSignature
from .utils import *
import screed
def test_sourmash_signature(datadir, ksize):
import sourmash
rfile = datadir('random-20-a.fa')
boink_sig = SourmashSignature.Signature.build(10000, 31, False, 42, 0)
sourmash_sig = sourmash.MinHash(10000, 31)
processor = SourmashSignature.Processor.build(boink_sig)
processor.process(rfile)
for record in screed.open(rfile):
sourmash_sig.add_sequence(record.sequence)
boink_mh = boink_sig.to_sourmash()
assert boink_mh.similarity(sourmash_sig) == 1.0
|
<commit_before><commit_msg>Test that sourmash signature processor results match sourmash library<commit_after>#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# (c) Camille Scott, 2019
# File : test_signatures.py
# License: MIT
# Author : Camille Scott <camille.scott.w@gmail.com>
# Date : 16.10.2019
import pytest
from boink.signatures import SourmashSignature
from .utils import *
import screed
def test_sourmash_signature(datadir, ksize):
import sourmash
rfile = datadir('random-20-a.fa')
boink_sig = SourmashSignature.Signature.build(10000, 31, False, 42, 0)
sourmash_sig = sourmash.MinHash(10000, 31)
processor = SourmashSignature.Processor.build(boink_sig)
processor.process(rfile)
for record in screed.open(rfile):
sourmash_sig.add_sequence(record.sequence)
boink_mh = boink_sig.to_sourmash()
assert boink_mh.similarity(sourmash_sig) == 1.0
|
|
1ff5402c960ac23a9339eaac17cd8e002118be9e
|
bluebottle/payments/tests/test_adapters.py
|
bluebottle/payments/tests/test_adapters.py
|
from django.test.utils import override_settings
from moneyed import Money
from bluebottle.payments.adapters import BasePaymentAdapter
from bluebottle.test.factory_models.payments import OrderPaymentFactory, PaymentFactory
from bluebottle.test.utils import BluebottleTestCase
@override_settings(MERCHANT_ACCOUNTS = [{
'merchant': 'docdata',
'merchant_password': 'eur_password',
'currency': 'EUR',
'merchant_name': 'eur_username'
}, {
'merchant': 'docdata',
'merchant_password': 'usd_password',
'currency': 'USD',
'merchant_name': 'usd_username'
}])
class PaymentAdapterTestCase(BluebottleTestCase):
def setUp(self):
self.order_payment = OrderPaymentFactory.create(
payment_method='docdata',
amount=Money(200, 'EUR')
)
PaymentFactory.create(order_payment=self.order_payment)
self.adapter = BasePaymentAdapter(self.order_payment)
def test_credentials(self):
credentials = self.adapter.credentials
self.assertEqual('EUR', credentials['currency'])
def test_credentials_usd(self):
self.order_payment.amount = Money(100, 'USD')
self.order_payment.save()
credentials = self.adapter.credentials
self.assertEqual('USD', credentials['currency'])
|
Add tests for payment adapter credentials
|
Add tests for payment adapter credentials
|
Python
|
bsd-3-clause
|
onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle
|
Add tests for payment adapter credentials
|
from django.test.utils import override_settings
from moneyed import Money
from bluebottle.payments.adapters import BasePaymentAdapter
from bluebottle.test.factory_models.payments import OrderPaymentFactory, PaymentFactory
from bluebottle.test.utils import BluebottleTestCase
@override_settings(MERCHANT_ACCOUNTS = [{
'merchant': 'docdata',
'merchant_password': 'eur_password',
'currency': 'EUR',
'merchant_name': 'eur_username'
}, {
'merchant': 'docdata',
'merchant_password': 'usd_password',
'currency': 'USD',
'merchant_name': 'usd_username'
}])
class PaymentAdapterTestCase(BluebottleTestCase):
def setUp(self):
self.order_payment = OrderPaymentFactory.create(
payment_method='docdata',
amount=Money(200, 'EUR')
)
PaymentFactory.create(order_payment=self.order_payment)
self.adapter = BasePaymentAdapter(self.order_payment)
def test_credentials(self):
credentials = self.adapter.credentials
self.assertEqual('EUR', credentials['currency'])
def test_credentials_usd(self):
self.order_payment.amount = Money(100, 'USD')
self.order_payment.save()
credentials = self.adapter.credentials
self.assertEqual('USD', credentials['currency'])
|
<commit_before><commit_msg>Add tests for payment adapter credentials<commit_after>
|
from django.test.utils import override_settings
from moneyed import Money
from bluebottle.payments.adapters import BasePaymentAdapter
from bluebottle.test.factory_models.payments import OrderPaymentFactory, PaymentFactory
from bluebottle.test.utils import BluebottleTestCase
@override_settings(MERCHANT_ACCOUNTS = [{
'merchant': 'docdata',
'merchant_password': 'eur_password',
'currency': 'EUR',
'merchant_name': 'eur_username'
}, {
'merchant': 'docdata',
'merchant_password': 'usd_password',
'currency': 'USD',
'merchant_name': 'usd_username'
}])
class PaymentAdapterTestCase(BluebottleTestCase):
def setUp(self):
self.order_payment = OrderPaymentFactory.create(
payment_method='docdata',
amount=Money(200, 'EUR')
)
PaymentFactory.create(order_payment=self.order_payment)
self.adapter = BasePaymentAdapter(self.order_payment)
def test_credentials(self):
credentials = self.adapter.credentials
self.assertEqual('EUR', credentials['currency'])
def test_credentials_usd(self):
self.order_payment.amount = Money(100, 'USD')
self.order_payment.save()
credentials = self.adapter.credentials
self.assertEqual('USD', credentials['currency'])
|
Add tests for payment adapter credentialsfrom django.test.utils import override_settings
from moneyed import Money
from bluebottle.payments.adapters import BasePaymentAdapter
from bluebottle.test.factory_models.payments import OrderPaymentFactory, PaymentFactory
from bluebottle.test.utils import BluebottleTestCase
@override_settings(MERCHANT_ACCOUNTS = [{
'merchant': 'docdata',
'merchant_password': 'eur_password',
'currency': 'EUR',
'merchant_name': 'eur_username'
}, {
'merchant': 'docdata',
'merchant_password': 'usd_password',
'currency': 'USD',
'merchant_name': 'usd_username'
}])
class PaymentAdapterTestCase(BluebottleTestCase):
def setUp(self):
self.order_payment = OrderPaymentFactory.create(
payment_method='docdata',
amount=Money(200, 'EUR')
)
PaymentFactory.create(order_payment=self.order_payment)
self.adapter = BasePaymentAdapter(self.order_payment)
def test_credentials(self):
credentials = self.adapter.credentials
self.assertEqual('EUR', credentials['currency'])
def test_credentials_usd(self):
self.order_payment.amount = Money(100, 'USD')
self.order_payment.save()
credentials = self.adapter.credentials
self.assertEqual('USD', credentials['currency'])
|
<commit_before><commit_msg>Add tests for payment adapter credentials<commit_after>from django.test.utils import override_settings
from moneyed import Money
from bluebottle.payments.adapters import BasePaymentAdapter
from bluebottle.test.factory_models.payments import OrderPaymentFactory, PaymentFactory
from bluebottle.test.utils import BluebottleTestCase
@override_settings(MERCHANT_ACCOUNTS = [{
'merchant': 'docdata',
'merchant_password': 'eur_password',
'currency': 'EUR',
'merchant_name': 'eur_username'
}, {
'merchant': 'docdata',
'merchant_password': 'usd_password',
'currency': 'USD',
'merchant_name': 'usd_username'
}])
class PaymentAdapterTestCase(BluebottleTestCase):
def setUp(self):
self.order_payment = OrderPaymentFactory.create(
payment_method='docdata',
amount=Money(200, 'EUR')
)
PaymentFactory.create(order_payment=self.order_payment)
self.adapter = BasePaymentAdapter(self.order_payment)
def test_credentials(self):
credentials = self.adapter.credentials
self.assertEqual('EUR', credentials['currency'])
def test_credentials_usd(self):
self.order_payment.amount = Money(100, 'USD')
self.order_payment.save()
credentials = self.adapter.credentials
self.assertEqual('USD', credentials['currency'])
|
|
5db2b7ce0573048077054a29857c4e46d44ca0b8
|
migrations/versions/280_switch_g7_framework_to_open.py
|
migrations/versions/280_switch_g7_framework_to_open.py
|
"""Add audit_events type, object and created_at indexes
Revision ID: 280_switch_g7_framework_to_open
Revises: 270_add_audit_events_indexes
Create Date: 2015-09-01 13:45:44.886576
"""
# revision identifiers, used by Alembic.
revision = '280_switch_g7_framework_to_open'
down_revision = '270_add_audit_events_indexes'
from alembic import op
def upgrade():
op.execute("UPDATE frameworks SET status='open' WHERE name='G-Cloud 7'")
def downgrade():
pass
|
Switch on G-Cloud 7 - status change to open
|
Switch on G-Cloud 7 - status change to open
|
Python
|
mit
|
alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api
|
Switch on G-Cloud 7 - status change to open
|
"""Add audit_events type, object and created_at indexes
Revision ID: 280_switch_g7_framework_to_open
Revises: 270_add_audit_events_indexes
Create Date: 2015-09-01 13:45:44.886576
"""
# revision identifiers, used by Alembic.
revision = '280_switch_g7_framework_to_open'
down_revision = '270_add_audit_events_indexes'
from alembic import op
def upgrade():
op.execute("UPDATE frameworks SET status='open' WHERE name='G-Cloud 7'")
def downgrade():
pass
|
<commit_before><commit_msg>Switch on G-Cloud 7 - status change to open<commit_after>
|
"""Add audit_events type, object and created_at indexes
Revision ID: 280_switch_g7_framework_to_open
Revises: 270_add_audit_events_indexes
Create Date: 2015-09-01 13:45:44.886576
"""
# revision identifiers, used by Alembic.
revision = '280_switch_g7_framework_to_open'
down_revision = '270_add_audit_events_indexes'
from alembic import op
def upgrade():
op.execute("UPDATE frameworks SET status='open' WHERE name='G-Cloud 7'")
def downgrade():
pass
|
Switch on G-Cloud 7 - status change to open"""Add audit_events type, object and created_at indexes
Revision ID: 280_switch_g7_framework_to_open
Revises: 270_add_audit_events_indexes
Create Date: 2015-09-01 13:45:44.886576
"""
# revision identifiers, used by Alembic.
revision = '280_switch_g7_framework_to_open'
down_revision = '270_add_audit_events_indexes'
from alembic import op
def upgrade():
op.execute("UPDATE frameworks SET status='open' WHERE name='G-Cloud 7'")
def downgrade():
pass
|
<commit_before><commit_msg>Switch on G-Cloud 7 - status change to open<commit_after>"""Add audit_events type, object and created_at indexes
Revision ID: 280_switch_g7_framework_to_open
Revises: 270_add_audit_events_indexes
Create Date: 2015-09-01 13:45:44.886576
"""
# revision identifiers, used by Alembic.
revision = '280_switch_g7_framework_to_open'
down_revision = '270_add_audit_events_indexes'
from alembic import op
def upgrade():
op.execute("UPDATE frameworks SET status='open' WHERE name='G-Cloud 7'")
def downgrade():
pass
|
|
14bd96b2ce7e7b67d7fa9b583b44eb04630557b1
|
leetcode/RemoveElement.py
|
leetcode/RemoveElement.py
|
# Remove Element https://oj.leetcode.com/problems/remove-element/
# Given an array and a value, remove all instances of that value in place and return the new length.
# The order of elements can be changed. It doesn't matter what you leave beyond the new length.
#Arrays
# Xilin SUN
# Jan 8 2015
# Don't forget to return 0 if A == [].
class Solution:
# @param A a list of integers
# @param elem an integer, value need to be removed
# @return an integer
def removeElement(self, A, elem):
if len(A) == 0:
return 0
else:
k=0
for i in range(0, len(A)):
if A[i] != elem:
A[k] = A[i]
k += 1
return k
|
Add Remove Element from LeetCode
|
Add Remove Element from LeetCode
|
Python
|
mit
|
aenon/OnlineJudge,aenon/OnlineJudge
|
Add Remove Element from LeetCode
|
# Remove Element https://oj.leetcode.com/problems/remove-element/
# Given an array and a value, remove all instances of that value in place and return the new length.
# The order of elements can be changed. It doesn't matter what you leave beyond the new length.
#Arrays
# Xilin SUN
# Jan 8 2015
# Don't forget to return 0 if A == [].
class Solution:
# @param A a list of integers
# @param elem an integer, value need to be removed
# @return an integer
def removeElement(self, A, elem):
if len(A) == 0:
return 0
else:
k=0
for i in range(0, len(A)):
if A[i] != elem:
A[k] = A[i]
k += 1
return k
|
<commit_before><commit_msg>Add Remove Element from LeetCode<commit_after>
|
# Remove Element https://oj.leetcode.com/problems/remove-element/
# Given an array and a value, remove all instances of that value in place and return the new length.
# The order of elements can be changed. It doesn't matter what you leave beyond the new length.
#Arrays
# Xilin SUN
# Jan 8 2015
# Don't forget to return 0 if A == [].
class Solution:
# @param A a list of integers
# @param elem an integer, value need to be removed
# @return an integer
def removeElement(self, A, elem):
if len(A) == 0:
return 0
else:
k=0
for i in range(0, len(A)):
if A[i] != elem:
A[k] = A[i]
k += 1
return k
|
Add Remove Element from LeetCode# Remove Element https://oj.leetcode.com/problems/remove-element/
# Given an array and a value, remove all instances of that value in place and return the new length.
# The order of elements can be changed. It doesn't matter what you leave beyond the new length.
#Arrays
# Xilin SUN
# Jan 8 2015
# Don't forget to return 0 if A == [].
class Solution:
# @param A a list of integers
# @param elem an integer, value need to be removed
# @return an integer
def removeElement(self, A, elem):
if len(A) == 0:
return 0
else:
k=0
for i in range(0, len(A)):
if A[i] != elem:
A[k] = A[i]
k += 1
return k
|
<commit_before><commit_msg>Add Remove Element from LeetCode<commit_after># Remove Element https://oj.leetcode.com/problems/remove-element/
# Given an array and a value, remove all instances of that value in place and return the new length.
# The order of elements can be changed. It doesn't matter what you leave beyond the new length.
#Arrays
# Xilin SUN
# Jan 8 2015
# Don't forget to return 0 if A == [].
class Solution:
# @param A a list of integers
# @param elem an integer, value need to be removed
# @return an integer
def removeElement(self, A, elem):
if len(A) == 0:
return 0
else:
k=0
for i in range(0, len(A)):
if A[i] != elem:
A[k] = A[i]
k += 1
return k
|
|
214c7fdcad89135738c7caa60d7c57170a9c74db
|
workshopvenues/venues/migrations/0004_auto__add_field_address_country.py
|
workshopvenues/venues/migrations/0004_auto__add_field_address_country.py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Address.country'
db.add_column(u'venues_address', 'country',
self.gf('django.db.models.fields.CharField')(default='', max_length=30, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Address.country'
db.delete_column(u'venues_address', 'country')
models = {
u'venues.address': {
'Meta': {'object_name': 'Address'},
'country': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'town': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'venues.facility': {
'Meta': {'object_name': 'Facility'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'venues.venue': {
'Meta': {'object_name': 'Venue'},
'address': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['venues.Address']"}),
'facilities': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['venues.Facility']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'website': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['venues']
|
Add country field to Address
|
Add country field to Address
|
Python
|
bsd-3-clause
|
andreagrandi/workshopvenues
|
Add country field to Address
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Address.country'
db.add_column(u'venues_address', 'country',
self.gf('django.db.models.fields.CharField')(default='', max_length=30, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Address.country'
db.delete_column(u'venues_address', 'country')
models = {
u'venues.address': {
'Meta': {'object_name': 'Address'},
'country': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'town': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'venues.facility': {
'Meta': {'object_name': 'Facility'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'venues.venue': {
'Meta': {'object_name': 'Venue'},
'address': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['venues.Address']"}),
'facilities': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['venues.Facility']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'website': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['venues']
|
<commit_before><commit_msg>Add country field to Address<commit_after>
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Address.country'
db.add_column(u'venues_address', 'country',
self.gf('django.db.models.fields.CharField')(default='', max_length=30, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Address.country'
db.delete_column(u'venues_address', 'country')
models = {
u'venues.address': {
'Meta': {'object_name': 'Address'},
'country': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'town': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'venues.facility': {
'Meta': {'object_name': 'Facility'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'venues.venue': {
'Meta': {'object_name': 'Venue'},
'address': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['venues.Address']"}),
'facilities': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['venues.Facility']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'website': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['venues']
|
Add country field to Address# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Address.country'
db.add_column(u'venues_address', 'country',
self.gf('django.db.models.fields.CharField')(default='', max_length=30, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Address.country'
db.delete_column(u'venues_address', 'country')
models = {
u'venues.address': {
'Meta': {'object_name': 'Address'},
'country': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'town': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'venues.facility': {
'Meta': {'object_name': 'Facility'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'venues.venue': {
'Meta': {'object_name': 'Venue'},
'address': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['venues.Address']"}),
'facilities': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['venues.Facility']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'website': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['venues']
|
<commit_before><commit_msg>Add country field to Address<commit_after># -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Address.country'
db.add_column(u'venues_address', 'country',
self.gf('django.db.models.fields.CharField')(default='', max_length=30, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Address.country'
db.delete_column(u'venues_address', 'country')
models = {
u'venues.address': {
'Meta': {'object_name': 'Address'},
'country': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'town': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'venues.facility': {
'Meta': {'object_name': 'Facility'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'venues.venue': {
'Meta': {'object_name': 'Venue'},
'address': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['venues.Address']"}),
'facilities': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['venues.Facility']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'website': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['venues']
|
|
bcf2b57a3aca1953ce1bde7d2850f8475c561590
|
saleor/account/migrations/0040_permissions_to_groups.py
|
saleor/account/migrations/0040_permissions_to_groups.py
|
from collections import namedtuple
from django.db import migrations
def add_users_to_groups_based_on_users_permissions(apps, schema_editor):
"""Add every user to group with "user_permissions" if exists, else create new one.
For every user, iff group with exact scope of permissions exists, add user to it,
else create new group with this scope of permissions and add user to it.
"""
User = apps.get_model("account", "User")
Group = apps.get_model("auth", "Group")
groups = Group.objects.all().prefetch_related("permissions")
GroupData = namedtuple("GroupData", ["users", "group_name"])
mapping = create_permissions_mapping(User, GroupData)
for perms, group_data in mapping.items():
group = get_group_with_given_permissions(perms, groups)
users = group_data.users
if group:
group.user_set.add(*users)
continue
group = create_group_with_given_permissions(perms, group_data.group_name, Group)
group.user_set.add(*users)
def create_permissions_mapping(User, GroupData):
mapping = {}
users = User.objects.filter(user_permissions__isnull=False).prefetch_related(
"user_permissions"
)
for user in users:
permissions = user.user_permissions.all()
perm_pks = tuple(permissions.values_list("pk", flat=True))
if perm_pks not in mapping:
group_name = create_group_name(permissions)
mapping[perm_pks] = GroupData({user.pk}, group_name)
else:
mapping[perm_pks].users.add(user.pk)
user.user_permissions.clear()
return mapping
def create_group_name(permissions):
"""Create group name based on permissions."""
perm_names = permissions.values_list("name", flat=True)
formatted_names = [name.rstrip(".").lower() for name in perm_names]
group_name = ", ".join(formatted_names).capitalize()
return group_name
def get_group_with_given_permissions(permissions, groups):
for group in groups:
group_perm_pks = {perm.pk for perm in group.permissions.all()}
if group_perm_pks == set(permissions):
return group
def create_group_with_given_permissions(perm_pks, group_name, Group):
group = Group.objects.create(name=group_name)
group.permissions.add(*perm_pks)
return group
class Migration(migrations.Migration):
dependencies = [
("account", "0039_auto_20200221_0257"),
]
operations = [
migrations.RunPython(
add_users_to_groups_based_on_users_permissions, migrations.RunPython.noop
),
]
|
Add user permissions to groups migration
|
Add user permissions to groups migration
|
Python
|
bsd-3-clause
|
mociepka/saleor,mociepka/saleor,mociepka/saleor
|
Add user permissions to groups migration
|
from collections import namedtuple
from django.db import migrations
def add_users_to_groups_based_on_users_permissions(apps, schema_editor):
"""Add every user to group with "user_permissions" if exists, else create new one.
For every user, iff group with exact scope of permissions exists, add user to it,
else create new group with this scope of permissions and add user to it.
"""
User = apps.get_model("account", "User")
Group = apps.get_model("auth", "Group")
groups = Group.objects.all().prefetch_related("permissions")
GroupData = namedtuple("GroupData", ["users", "group_name"])
mapping = create_permissions_mapping(User, GroupData)
for perms, group_data in mapping.items():
group = get_group_with_given_permissions(perms, groups)
users = group_data.users
if group:
group.user_set.add(*users)
continue
group = create_group_with_given_permissions(perms, group_data.group_name, Group)
group.user_set.add(*users)
def create_permissions_mapping(User, GroupData):
mapping = {}
users = User.objects.filter(user_permissions__isnull=False).prefetch_related(
"user_permissions"
)
for user in users:
permissions = user.user_permissions.all()
perm_pks = tuple(permissions.values_list("pk", flat=True))
if perm_pks not in mapping:
group_name = create_group_name(permissions)
mapping[perm_pks] = GroupData({user.pk}, group_name)
else:
mapping[perm_pks].users.add(user.pk)
user.user_permissions.clear()
return mapping
def create_group_name(permissions):
"""Create group name based on permissions."""
perm_names = permissions.values_list("name", flat=True)
formatted_names = [name.rstrip(".").lower() for name in perm_names]
group_name = ", ".join(formatted_names).capitalize()
return group_name
def get_group_with_given_permissions(permissions, groups):
for group in groups:
group_perm_pks = {perm.pk for perm in group.permissions.all()}
if group_perm_pks == set(permissions):
return group
def create_group_with_given_permissions(perm_pks, group_name, Group):
group = Group.objects.create(name=group_name)
group.permissions.add(*perm_pks)
return group
class Migration(migrations.Migration):
dependencies = [
("account", "0039_auto_20200221_0257"),
]
operations = [
migrations.RunPython(
add_users_to_groups_based_on_users_permissions, migrations.RunPython.noop
),
]
|
<commit_before><commit_msg>Add user permissions to groups migration<commit_after>
|
from collections import namedtuple
from django.db import migrations
def add_users_to_groups_based_on_users_permissions(apps, schema_editor):
"""Add every user to group with "user_permissions" if exists, else create new one.
For every user, iff group with exact scope of permissions exists, add user to it,
else create new group with this scope of permissions and add user to it.
"""
User = apps.get_model("account", "User")
Group = apps.get_model("auth", "Group")
groups = Group.objects.all().prefetch_related("permissions")
GroupData = namedtuple("GroupData", ["users", "group_name"])
mapping = create_permissions_mapping(User, GroupData)
for perms, group_data in mapping.items():
group = get_group_with_given_permissions(perms, groups)
users = group_data.users
if group:
group.user_set.add(*users)
continue
group = create_group_with_given_permissions(perms, group_data.group_name, Group)
group.user_set.add(*users)
def create_permissions_mapping(User, GroupData):
mapping = {}
users = User.objects.filter(user_permissions__isnull=False).prefetch_related(
"user_permissions"
)
for user in users:
permissions = user.user_permissions.all()
perm_pks = tuple(permissions.values_list("pk", flat=True))
if perm_pks not in mapping:
group_name = create_group_name(permissions)
mapping[perm_pks] = GroupData({user.pk}, group_name)
else:
mapping[perm_pks].users.add(user.pk)
user.user_permissions.clear()
return mapping
def create_group_name(permissions):
"""Create group name based on permissions."""
perm_names = permissions.values_list("name", flat=True)
formatted_names = [name.rstrip(".").lower() for name in perm_names]
group_name = ", ".join(formatted_names).capitalize()
return group_name
def get_group_with_given_permissions(permissions, groups):
for group in groups:
group_perm_pks = {perm.pk for perm in group.permissions.all()}
if group_perm_pks == set(permissions):
return group
def create_group_with_given_permissions(perm_pks, group_name, Group):
group = Group.objects.create(name=group_name)
group.permissions.add(*perm_pks)
return group
class Migration(migrations.Migration):
dependencies = [
("account", "0039_auto_20200221_0257"),
]
operations = [
migrations.RunPython(
add_users_to_groups_based_on_users_permissions, migrations.RunPython.noop
),
]
|
Add user permissions to groups migrationfrom collections import namedtuple
from django.db import migrations
def add_users_to_groups_based_on_users_permissions(apps, schema_editor):
"""Add every user to group with "user_permissions" if exists, else create new one.
For every user, iff group with exact scope of permissions exists, add user to it,
else create new group with this scope of permissions and add user to it.
"""
User = apps.get_model("account", "User")
Group = apps.get_model("auth", "Group")
groups = Group.objects.all().prefetch_related("permissions")
GroupData = namedtuple("GroupData", ["users", "group_name"])
mapping = create_permissions_mapping(User, GroupData)
for perms, group_data in mapping.items():
group = get_group_with_given_permissions(perms, groups)
users = group_data.users
if group:
group.user_set.add(*users)
continue
group = create_group_with_given_permissions(perms, group_data.group_name, Group)
group.user_set.add(*users)
def create_permissions_mapping(User, GroupData):
mapping = {}
users = User.objects.filter(user_permissions__isnull=False).prefetch_related(
"user_permissions"
)
for user in users:
permissions = user.user_permissions.all()
perm_pks = tuple(permissions.values_list("pk", flat=True))
if perm_pks not in mapping:
group_name = create_group_name(permissions)
mapping[perm_pks] = GroupData({user.pk}, group_name)
else:
mapping[perm_pks].users.add(user.pk)
user.user_permissions.clear()
return mapping
def create_group_name(permissions):
"""Create group name based on permissions."""
perm_names = permissions.values_list("name", flat=True)
formatted_names = [name.rstrip(".").lower() for name in perm_names]
group_name = ", ".join(formatted_names).capitalize()
return group_name
def get_group_with_given_permissions(permissions, groups):
for group in groups:
group_perm_pks = {perm.pk for perm in group.permissions.all()}
if group_perm_pks == set(permissions):
return group
def create_group_with_given_permissions(perm_pks, group_name, Group):
group = Group.objects.create(name=group_name)
group.permissions.add(*perm_pks)
return group
class Migration(migrations.Migration):
dependencies = [
("account", "0039_auto_20200221_0257"),
]
operations = [
migrations.RunPython(
add_users_to_groups_based_on_users_permissions, migrations.RunPython.noop
),
]
|
<commit_before><commit_msg>Add user permissions to groups migration<commit_after>from collections import namedtuple
from django.db import migrations
def add_users_to_groups_based_on_users_permissions(apps, schema_editor):
"""Add every user to group with "user_permissions" if exists, else create new one.
For every user, iff group with exact scope of permissions exists, add user to it,
else create new group with this scope of permissions and add user to it.
"""
User = apps.get_model("account", "User")
Group = apps.get_model("auth", "Group")
groups = Group.objects.all().prefetch_related("permissions")
GroupData = namedtuple("GroupData", ["users", "group_name"])
mapping = create_permissions_mapping(User, GroupData)
for perms, group_data in mapping.items():
group = get_group_with_given_permissions(perms, groups)
users = group_data.users
if group:
group.user_set.add(*users)
continue
group = create_group_with_given_permissions(perms, group_data.group_name, Group)
group.user_set.add(*users)
def create_permissions_mapping(User, GroupData):
mapping = {}
users = User.objects.filter(user_permissions__isnull=False).prefetch_related(
"user_permissions"
)
for user in users:
permissions = user.user_permissions.all()
perm_pks = tuple(permissions.values_list("pk", flat=True))
if perm_pks not in mapping:
group_name = create_group_name(permissions)
mapping[perm_pks] = GroupData({user.pk}, group_name)
else:
mapping[perm_pks].users.add(user.pk)
user.user_permissions.clear()
return mapping
def create_group_name(permissions):
"""Create group name based on permissions."""
perm_names = permissions.values_list("name", flat=True)
formatted_names = [name.rstrip(".").lower() for name in perm_names]
group_name = ", ".join(formatted_names).capitalize()
return group_name
def get_group_with_given_permissions(permissions, groups):
for group in groups:
group_perm_pks = {perm.pk for perm in group.permissions.all()}
if group_perm_pks == set(permissions):
return group
def create_group_with_given_permissions(perm_pks, group_name, Group):
group = Group.objects.create(name=group_name)
group.permissions.add(*perm_pks)
return group
class Migration(migrations.Migration):
dependencies = [
("account", "0039_auto_20200221_0257"),
]
operations = [
migrations.RunPython(
add_users_to_groups_based_on_users_permissions, migrations.RunPython.noop
),
]
|
|
785a2f2f12dc72e624d76dd918533db1256c5885
|
mltils/sklearn/bagging.py
|
mltils/sklearn/bagging.py
|
import random
import numpy as np
from sklearn.base import BaseEstimator, clone
from tqdm import tqdm
class BaggedModel(BaseEstimator):
def __init__(self, estimator, n_models, random_state, verbose=True):
self.estimator = estimator
self.n_models = n_models
self.random_state = random_state
self.verbose = verbose
self.models = []
def fit(self, X, y, *args, **kwargs):
random.seed(self.random_state)
np.random.seed(self.random_state)
if self.verbose:
iterator = tqdm(range(self.n_models))
else:
iterator = range(self.n_models)
for _ in iterator:
model = clone(self.estimator)
self.models.append(model.fit(X, y, *args, **kwargs))
return self
def predict_proba(self, X, *args, **kwargs):
predictions = self.models[0].predict_proba(X, *args, **kwargs)
for i in range(1, self.n_models):
predictions += self.models[i].predict_proba(X, *args, **kwargs)
predictions = predictions / self.n_models
return predictions
|
Add class implementing a bagged estimator
|
Add class implementing a bagged estimator
|
Python
|
mit
|
rladeira/mltils
|
Add class implementing a bagged estimator
|
import random
import numpy as np
from sklearn.base import BaseEstimator, clone
from tqdm import tqdm
class BaggedModel(BaseEstimator):
def __init__(self, estimator, n_models, random_state, verbose=True):
self.estimator = estimator
self.n_models = n_models
self.random_state = random_state
self.verbose = verbose
self.models = []
def fit(self, X, y, *args, **kwargs):
random.seed(self.random_state)
np.random.seed(self.random_state)
if self.verbose:
iterator = tqdm(range(self.n_models))
else:
iterator = range(self.n_models)
for _ in iterator:
model = clone(self.estimator)
self.models.append(model.fit(X, y, *args, **kwargs))
return self
def predict_proba(self, X, *args, **kwargs):
predictions = self.models[0].predict_proba(X, *args, **kwargs)
for i in range(1, self.n_models):
predictions += self.models[i].predict_proba(X, *args, **kwargs)
predictions = predictions / self.n_models
return predictions
|
<commit_before><commit_msg>Add class implementing a bagged estimator<commit_after>
|
import random
import numpy as np
from sklearn.base import BaseEstimator, clone
from tqdm import tqdm
class BaggedModel(BaseEstimator):
def __init__(self, estimator, n_models, random_state, verbose=True):
self.estimator = estimator
self.n_models = n_models
self.random_state = random_state
self.verbose = verbose
self.models = []
def fit(self, X, y, *args, **kwargs):
random.seed(self.random_state)
np.random.seed(self.random_state)
if self.verbose:
iterator = tqdm(range(self.n_models))
else:
iterator = range(self.n_models)
for _ in iterator:
model = clone(self.estimator)
self.models.append(model.fit(X, y, *args, **kwargs))
return self
def predict_proba(self, X, *args, **kwargs):
predictions = self.models[0].predict_proba(X, *args, **kwargs)
for i in range(1, self.n_models):
predictions += self.models[i].predict_proba(X, *args, **kwargs)
predictions = predictions / self.n_models
return predictions
|
Add class implementing a bagged estimator
import random
import numpy as np
from sklearn.base import BaseEstimator, clone
from tqdm import tqdm
class BaggedModel(BaseEstimator):
def __init__(self, estimator, n_models, random_state, verbose=True):
self.estimator = estimator
self.n_models = n_models
self.random_state = random_state
self.verbose = verbose
self.models = []
def fit(self, X, y, *args, **kwargs):
random.seed(self.random_state)
np.random.seed(self.random_state)
if self.verbose:
iterator = tqdm(range(self.n_models))
else:
iterator = range(self.n_models)
for _ in iterator:
model = clone(self.estimator)
self.models.append(model.fit(X, y, *args, **kwargs))
return self
def predict_proba(self, X, *args, **kwargs):
predictions = self.models[0].predict_proba(X, *args, **kwargs)
for i in range(1, self.n_models):
predictions += self.models[i].predict_proba(X, *args, **kwargs)
predictions = predictions / self.n_models
return predictions
|
<commit_before><commit_msg>Add class implementing a bagged estimator<commit_after>
import random
import numpy as np
from sklearn.base import BaseEstimator, clone
from tqdm import tqdm
class BaggedModel(BaseEstimator):
def __init__(self, estimator, n_models, random_state, verbose=True):
self.estimator = estimator
self.n_models = n_models
self.random_state = random_state
self.verbose = verbose
self.models = []
def fit(self, X, y, *args, **kwargs):
random.seed(self.random_state)
np.random.seed(self.random_state)
if self.verbose:
iterator = tqdm(range(self.n_models))
else:
iterator = range(self.n_models)
for _ in iterator:
model = clone(self.estimator)
self.models.append(model.fit(X, y, *args, **kwargs))
return self
def predict_proba(self, X, *args, **kwargs):
predictions = self.models[0].predict_proba(X, *args, **kwargs)
for i in range(1, self.n_models):
predictions += self.models[i].predict_proba(X, *args, **kwargs)
predictions = predictions / self.n_models
return predictions
|
|
ca791d66b85baae91d6decd5f5a201a2b7512efb
|
wafer/schedule/migrations/0002_auto_20140909_1403.py
|
wafer/schedule/migrations/0002_auto_20140909_1403.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('schedule', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='slot',
options={'ordering': ['day', 'end_time', 'start_time']},
),
]
|
Update migration to latest schedule work
|
Update migration to latest schedule work
|
Python
|
isc
|
CarlFK/wafer,CTPUG/wafer,CarlFK/wafer,CTPUG/wafer,CarlFK/wafer,CTPUG/wafer,CarlFK/wafer,CTPUG/wafer
|
Update migration to latest schedule work
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('schedule', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='slot',
options={'ordering': ['day', 'end_time', 'start_time']},
),
]
|
<commit_before><commit_msg>Update migration to latest schedule work<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('schedule', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='slot',
options={'ordering': ['day', 'end_time', 'start_time']},
),
]
|
Update migration to latest schedule work# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('schedule', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='slot',
options={'ordering': ['day', 'end_time', 'start_time']},
),
]
|
<commit_before><commit_msg>Update migration to latest schedule work<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('schedule', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='slot',
options={'ordering': ['day', 'end_time', 'start_time']},
),
]
|
|
09d6b1e3766ff8e35d55c4aa3e7574c292d0be34
|
tests/utils/test_get_storage_class.py
|
tests/utils/test_get_storage_class.py
|
import pytest
from storages import FileSystemStorage
from storages.utils import get_storage_class
def test_get_filesystem_storage():
"""
get_storage_class returns the class for a storage backend name/path.
"""
assert get_storage_class("storages.FileSystemStorage") is FileSystemStorage
def test_get_invalid_storage_module():
"""
get_storage_class raises an error if the requested import don't exist.
"""
with pytest.raises(ValueError):
get_storage_class("NonExistingStorage")
def test_get_nonexisting_storage_class():
"""
get_storage_class raises an error if the requested class don't exist.
"""
with pytest.raises(AttributeError):
get_storage_class("storages.NonExistingStorage")
def test_get_nonexisting_storage_module():
"""
get_storage_class raises an error if the requested module don't exist.
"""
# Error message may or may not be the fully qualified path.
with pytest.raises(ImportError):
get_storage_class("storages.non_existing.NonExistingStoage")
|
Test the get_storage_class utility function
|
Test the get_storage_class utility function
|
Python
|
bsd-2-clause
|
dstufft/storages
|
Test the get_storage_class utility function
|
import pytest
from storages import FileSystemStorage
from storages.utils import get_storage_class
def test_get_filesystem_storage():
"""
get_storage_class returns the class for a storage backend name/path.
"""
assert get_storage_class("storages.FileSystemStorage") is FileSystemStorage
def test_get_invalid_storage_module():
"""
get_storage_class raises an error if the requested import don't exist.
"""
with pytest.raises(ValueError):
get_storage_class("NonExistingStorage")
def test_get_nonexisting_storage_class():
"""
get_storage_class raises an error if the requested class don't exist.
"""
with pytest.raises(AttributeError):
get_storage_class("storages.NonExistingStorage")
def test_get_nonexisting_storage_module():
"""
get_storage_class raises an error if the requested module don't exist.
"""
# Error message may or may not be the fully qualified path.
with pytest.raises(ImportError):
get_storage_class("storages.non_existing.NonExistingStoage")
|
<commit_before><commit_msg>Test the get_storage_class utility function<commit_after>
|
import pytest
from storages import FileSystemStorage
from storages.utils import get_storage_class
def test_get_filesystem_storage():
"""
get_storage_class returns the class for a storage backend name/path.
"""
assert get_storage_class("storages.FileSystemStorage") is FileSystemStorage
def test_get_invalid_storage_module():
"""
get_storage_class raises an error if the requested import don't exist.
"""
with pytest.raises(ValueError):
get_storage_class("NonExistingStorage")
def test_get_nonexisting_storage_class():
"""
get_storage_class raises an error if the requested class don't exist.
"""
with pytest.raises(AttributeError):
get_storage_class("storages.NonExistingStorage")
def test_get_nonexisting_storage_module():
"""
get_storage_class raises an error if the requested module don't exist.
"""
# Error message may or may not be the fully qualified path.
with pytest.raises(ImportError):
get_storage_class("storages.non_existing.NonExistingStoage")
|
Test the get_storage_class utility functionimport pytest
from storages import FileSystemStorage
from storages.utils import get_storage_class
def test_get_filesystem_storage():
"""
get_storage_class returns the class for a storage backend name/path.
"""
assert get_storage_class("storages.FileSystemStorage") is FileSystemStorage
def test_get_invalid_storage_module():
"""
get_storage_class raises an error if the requested import don't exist.
"""
with pytest.raises(ValueError):
get_storage_class("NonExistingStorage")
def test_get_nonexisting_storage_class():
"""
get_storage_class raises an error if the requested class don't exist.
"""
with pytest.raises(AttributeError):
get_storage_class("storages.NonExistingStorage")
def test_get_nonexisting_storage_module():
"""
get_storage_class raises an error if the requested module don't exist.
"""
# Error message may or may not be the fully qualified path.
with pytest.raises(ImportError):
get_storage_class("storages.non_existing.NonExistingStoage")
|
<commit_before><commit_msg>Test the get_storage_class utility function<commit_after>import pytest
from storages import FileSystemStorage
from storages.utils import get_storage_class
def test_get_filesystem_storage():
"""
get_storage_class returns the class for a storage backend name/path.
"""
assert get_storage_class("storages.FileSystemStorage") is FileSystemStorage
def test_get_invalid_storage_module():
"""
get_storage_class raises an error if the requested import don't exist.
"""
with pytest.raises(ValueError):
get_storage_class("NonExistingStorage")
def test_get_nonexisting_storage_class():
"""
get_storage_class raises an error if the requested class don't exist.
"""
with pytest.raises(AttributeError):
get_storage_class("storages.NonExistingStorage")
def test_get_nonexisting_storage_module():
"""
get_storage_class raises an error if the requested module don't exist.
"""
# Error message may or may not be the fully qualified path.
with pytest.raises(ImportError):
get_storage_class("storages.non_existing.NonExistingStoage")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.