commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d4274b32f283a056bec7048f1ab8b78bf1518bb6
|
var/spack/repos/builtin/packages/bwa/package.py
|
var/spack/repos/builtin/packages/bwa/package.py
|
##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Bwa(Package):
"""Burrow-Wheeler Aligner for pairwise alignment between DNA sequences."""
homepage = "http://github.com/lh3/bwa"
url = "https://github.com/lh3/bwa/releases/download/v0.7.15/bwa-0.7.15.tar.bz2"
version('0.7.15', 'fcf470a46a1dbe2f96a1c5b87c530554')
depends_on('zlib')
def install(self, spec, prefix):
filter_file(r'^INCLUDES=',
"INCLUDES=-I%s" % spec['zlib'].prefix.include, 'Makefile')
filter_file(r'^LIBS=', "LIBS=-L%s " % spec['zlib'].prefix.lib,
'Makefile')
make()
mkdirp(prefix.bin)
install('bwa', join_path(prefix.bin, 'bwa'))
set_executable(join_path(prefix.bin, 'bwa'))
mkdirp(prefix.doc)
install('README.md', prefix.doc)
install('NEWS.md', prefix.doc)
mkdirp(prefix.man1)
install('bwa.1', prefix.man1)
|
Make flake8 happy (long lines)
|
Make flake8 happy (long lines)
|
Python
|
lgpl-2.1
|
matthiasdiener/spack,matthiasdiener/spack,TheTimmy/spack,skosukhin/spack,EmreAtes/spack,matthiasdiener/spack,mfherbst/spack,tmerrick1/spack,TheTimmy/spack,EmreAtes/spack,LLNL/spack,tmerrick1/spack,lgarren/spack,lgarren/spack,skosukhin/spack,EmreAtes/spack,iulian787/spack,lgarren/spack,krafczyk/spack,TheTimmy/spack,iulian787/spack,LLNL/spack,tmerrick1/spack,krafczyk/spack,TheTimmy/spack,matthiasdiener/spack,mfherbst/spack,mfherbst/spack,LLNL/spack,matthiasdiener/spack,iulian787/spack,LLNL/spack,mfherbst/spack,EmreAtes/spack,lgarren/spack,iulian787/spack,lgarren/spack,iulian787/spack,skosukhin/spack,skosukhin/spack,TheTimmy/spack,EmreAtes/spack,krafczyk/spack,tmerrick1/spack,skosukhin/spack,krafczyk/spack,krafczyk/spack,mfherbst/spack,tmerrick1/spack,LLNL/spack
|
Make flake8 happy (long lines)
|
##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Bwa(Package):
"""Burrow-Wheeler Aligner for pairwise alignment between DNA sequences."""
homepage = "http://github.com/lh3/bwa"
url = "https://github.com/lh3/bwa/releases/download/v0.7.15/bwa-0.7.15.tar.bz2"
version('0.7.15', 'fcf470a46a1dbe2f96a1c5b87c530554')
depends_on('zlib')
def install(self, spec, prefix):
filter_file(r'^INCLUDES=',
"INCLUDES=-I%s" % spec['zlib'].prefix.include, 'Makefile')
filter_file(r'^LIBS=', "LIBS=-L%s " % spec['zlib'].prefix.lib,
'Makefile')
make()
mkdirp(prefix.bin)
install('bwa', join_path(prefix.bin, 'bwa'))
set_executable(join_path(prefix.bin, 'bwa'))
mkdirp(prefix.doc)
install('README.md', prefix.doc)
install('NEWS.md', prefix.doc)
mkdirp(prefix.man1)
install('bwa.1', prefix.man1)
|
<commit_before><commit_msg>Make flake8 happy (long lines)<commit_after>
|
##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Bwa(Package):
"""Burrow-Wheeler Aligner for pairwise alignment between DNA sequences."""
homepage = "http://github.com/lh3/bwa"
url = "https://github.com/lh3/bwa/releases/download/v0.7.15/bwa-0.7.15.tar.bz2"
version('0.7.15', 'fcf470a46a1dbe2f96a1c5b87c530554')
depends_on('zlib')
def install(self, spec, prefix):
filter_file(r'^INCLUDES=',
"INCLUDES=-I%s" % spec['zlib'].prefix.include, 'Makefile')
filter_file(r'^LIBS=', "LIBS=-L%s " % spec['zlib'].prefix.lib,
'Makefile')
make()
mkdirp(prefix.bin)
install('bwa', join_path(prefix.bin, 'bwa'))
set_executable(join_path(prefix.bin, 'bwa'))
mkdirp(prefix.doc)
install('README.md', prefix.doc)
install('NEWS.md', prefix.doc)
mkdirp(prefix.man1)
install('bwa.1', prefix.man1)
|
Make flake8 happy (long lines)##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Bwa(Package):
"""Burrow-Wheeler Aligner for pairwise alignment between DNA sequences."""
homepage = "http://github.com/lh3/bwa"
url = "https://github.com/lh3/bwa/releases/download/v0.7.15/bwa-0.7.15.tar.bz2"
version('0.7.15', 'fcf470a46a1dbe2f96a1c5b87c530554')
depends_on('zlib')
def install(self, spec, prefix):
filter_file(r'^INCLUDES=',
"INCLUDES=-I%s" % spec['zlib'].prefix.include, 'Makefile')
filter_file(r'^LIBS=', "LIBS=-L%s " % spec['zlib'].prefix.lib,
'Makefile')
make()
mkdirp(prefix.bin)
install('bwa', join_path(prefix.bin, 'bwa'))
set_executable(join_path(prefix.bin, 'bwa'))
mkdirp(prefix.doc)
install('README.md', prefix.doc)
install('NEWS.md', prefix.doc)
mkdirp(prefix.man1)
install('bwa.1', prefix.man1)
|
<commit_before><commit_msg>Make flake8 happy (long lines)<commit_after>##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Bwa(Package):
"""Burrow-Wheeler Aligner for pairwise alignment between DNA sequences."""
homepage = "http://github.com/lh3/bwa"
url = "https://github.com/lh3/bwa/releases/download/v0.7.15/bwa-0.7.15.tar.bz2"
version('0.7.15', 'fcf470a46a1dbe2f96a1c5b87c530554')
depends_on('zlib')
def install(self, spec, prefix):
filter_file(r'^INCLUDES=',
"INCLUDES=-I%s" % spec['zlib'].prefix.include, 'Makefile')
filter_file(r'^LIBS=', "LIBS=-L%s " % spec['zlib'].prefix.lib,
'Makefile')
make()
mkdirp(prefix.bin)
install('bwa', join_path(prefix.bin, 'bwa'))
set_executable(join_path(prefix.bin, 'bwa'))
mkdirp(prefix.doc)
install('README.md', prefix.doc)
install('NEWS.md', prefix.doc)
mkdirp(prefix.man1)
install('bwa.1', prefix.man1)
|
|
a0cff25bdc493925ae750d6daa0bad0150677f2e
|
rdmo/projects/migrations/0032_data_migration.py
|
rdmo/projects/migrations/0032_data_migration.py
|
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
def run_data_migration(apps, schema_editor):
Project = apps.get_model('projects', 'Project')
Task = apps.get_model('tasks', 'Task')
View = apps.get_model('views', 'View')
for project in Project.objects.all():
# add all tasks to project
tasks = Task.objects.filter(sites=settings.SITE_ID)
for task in tasks:
project.tasks.add(task)
# add all views to project
views = View.objects.filter(sites=settings.SITE_ID).filter(models.Q(catalogs=None) | models.Q(catalogs=project.catalog))
for view in views:
project.views.add(view)
class Migration(migrations.Migration):
dependencies = [
('projects', '0031_project_tasks'),
('tasks', '0028_data_migration'),
('views', '0023_data_migration')
]
operations = [
migrations.RunPython(run_data_migration),
]
|
Add migration to add tasks and views to existing projects
|
Add migration to add tasks and views to existing projects
|
Python
|
apache-2.0
|
DMPwerkzeug/DMPwerkzeug,rdmorganiser/rdmo,rdmorganiser/rdmo,DMPwerkzeug/DMPwerkzeug,DMPwerkzeug/DMPwerkzeug,rdmorganiser/rdmo
|
Add migration to add tasks and views to existing projects
|
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
def run_data_migration(apps, schema_editor):
Project = apps.get_model('projects', 'Project')
Task = apps.get_model('tasks', 'Task')
View = apps.get_model('views', 'View')
for project in Project.objects.all():
# add all tasks to project
tasks = Task.objects.filter(sites=settings.SITE_ID)
for task in tasks:
project.tasks.add(task)
# add all views to project
views = View.objects.filter(sites=settings.SITE_ID).filter(models.Q(catalogs=None) | models.Q(catalogs=project.catalog))
for view in views:
project.views.add(view)
class Migration(migrations.Migration):
dependencies = [
('projects', '0031_project_tasks'),
('tasks', '0028_data_migration'),
('views', '0023_data_migration')
]
operations = [
migrations.RunPython(run_data_migration),
]
|
<commit_before><commit_msg>Add migration to add tasks and views to existing projects<commit_after>
|
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
def run_data_migration(apps, schema_editor):
Project = apps.get_model('projects', 'Project')
Task = apps.get_model('tasks', 'Task')
View = apps.get_model('views', 'View')
for project in Project.objects.all():
# add all tasks to project
tasks = Task.objects.filter(sites=settings.SITE_ID)
for task in tasks:
project.tasks.add(task)
# add all views to project
views = View.objects.filter(sites=settings.SITE_ID).filter(models.Q(catalogs=None) | models.Q(catalogs=project.catalog))
for view in views:
project.views.add(view)
class Migration(migrations.Migration):
dependencies = [
('projects', '0031_project_tasks'),
('tasks', '0028_data_migration'),
('views', '0023_data_migration')
]
operations = [
migrations.RunPython(run_data_migration),
]
|
Add migration to add tasks and views to existing projectsfrom __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
def run_data_migration(apps, schema_editor):
Project = apps.get_model('projects', 'Project')
Task = apps.get_model('tasks', 'Task')
View = apps.get_model('views', 'View')
for project in Project.objects.all():
# add all tasks to project
tasks = Task.objects.filter(sites=settings.SITE_ID)
for task in tasks:
project.tasks.add(task)
# add all views to project
views = View.objects.filter(sites=settings.SITE_ID).filter(models.Q(catalogs=None) | models.Q(catalogs=project.catalog))
for view in views:
project.views.add(view)
class Migration(migrations.Migration):
dependencies = [
('projects', '0031_project_tasks'),
('tasks', '0028_data_migration'),
('views', '0023_data_migration')
]
operations = [
migrations.RunPython(run_data_migration),
]
|
<commit_before><commit_msg>Add migration to add tasks and views to existing projects<commit_after>from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
def run_data_migration(apps, schema_editor):
Project = apps.get_model('projects', 'Project')
Task = apps.get_model('tasks', 'Task')
View = apps.get_model('views', 'View')
for project in Project.objects.all():
# add all tasks to project
tasks = Task.objects.filter(sites=settings.SITE_ID)
for task in tasks:
project.tasks.add(task)
# add all views to project
views = View.objects.filter(sites=settings.SITE_ID).filter(models.Q(catalogs=None) | models.Q(catalogs=project.catalog))
for view in views:
project.views.add(view)
class Migration(migrations.Migration):
dependencies = [
('projects', '0031_project_tasks'),
('tasks', '0028_data_migration'),
('views', '0023_data_migration')
]
operations = [
migrations.RunPython(run_data_migration),
]
|
|
9867362ddc198ecdd095b6190936a683caed4aad
|
oneTimeEditedString.py
|
oneTimeEditedString.py
|
def isEditedOnce(stringOne,stringTwo):
stringOneLength = len(stringOne)
stringTwoLength = len(stringTwo)
totalChanges = 0
if abs(stringOneLength - stringTwoLength) > 1:
return False
else:
if stringOneLength == stringTwoLength:
for index in range(0,stringOneLength):
if stringOne[index] != stringTwo[index]:
totalChanges = totalChanges + 1
else:
hashTable = [0] * 27
for character in stringOne.lower():
hashTable[ord(character)%96] = hashTable[ord(character)%96] + 1
for character in stringTwo.lower():
if hashTable[ord(character)%96] > 0:
hashTable[ord(character)%96] = hashTable[ord(character)%96] - 1
else:
hashTable[ord(character)%96] = hashTable[ord(character)%96] + 1
for index in range(0,27):
if hashTable[index] > 0:
totalChanges = totalChanges +1
if totalChanges > 1:
return False
else:
return True
print isEditedOnce('pale','ple') # True ,O(n)
print isEditedOnce('pales','pale') # True ,O(n)
print isEditedOnce('pale','bale') # True ,O(n)
print isEditedOnce('pale','bake') # False ,O(n)
|
Verify String is edited only one time
|
Verify String is edited only one time
|
Python
|
mit
|
arunkumarpalaniappan/algorithm_tryouts
|
Verify String is edited only one time
|
def isEditedOnce(stringOne,stringTwo):
stringOneLength = len(stringOne)
stringTwoLength = len(stringTwo)
totalChanges = 0
if abs(stringOneLength - stringTwoLength) > 1:
return False
else:
if stringOneLength == stringTwoLength:
for index in range(0,stringOneLength):
if stringOne[index] != stringTwo[index]:
totalChanges = totalChanges + 1
else:
hashTable = [0] * 27
for character in stringOne.lower():
hashTable[ord(character)%96] = hashTable[ord(character)%96] + 1
for character in stringTwo.lower():
if hashTable[ord(character)%96] > 0:
hashTable[ord(character)%96] = hashTable[ord(character)%96] - 1
else:
hashTable[ord(character)%96] = hashTable[ord(character)%96] + 1
for index in range(0,27):
if hashTable[index] > 0:
totalChanges = totalChanges +1
if totalChanges > 1:
return False
else:
return True
print isEditedOnce('pale','ple') # True ,O(n)
print isEditedOnce('pales','pale') # True ,O(n)
print isEditedOnce('pale','bale') # True ,O(n)
print isEditedOnce('pale','bake') # False ,O(n)
|
<commit_before><commit_msg>Verify String is edited only one time<commit_after>
|
def isEditedOnce(stringOne,stringTwo):
stringOneLength = len(stringOne)
stringTwoLength = len(stringTwo)
totalChanges = 0
if abs(stringOneLength - stringTwoLength) > 1:
return False
else:
if stringOneLength == stringTwoLength:
for index in range(0,stringOneLength):
if stringOne[index] != stringTwo[index]:
totalChanges = totalChanges + 1
else:
hashTable = [0] * 27
for character in stringOne.lower():
hashTable[ord(character)%96] = hashTable[ord(character)%96] + 1
for character in stringTwo.lower():
if hashTable[ord(character)%96] > 0:
hashTable[ord(character)%96] = hashTable[ord(character)%96] - 1
else:
hashTable[ord(character)%96] = hashTable[ord(character)%96] + 1
for index in range(0,27):
if hashTable[index] > 0:
totalChanges = totalChanges +1
if totalChanges > 1:
return False
else:
return True
print isEditedOnce('pale','ple') # True ,O(n)
print isEditedOnce('pales','pale') # True ,O(n)
print isEditedOnce('pale','bale') # True ,O(n)
print isEditedOnce('pale','bake') # False ,O(n)
|
Verify String is edited only one timedef isEditedOnce(stringOne,stringTwo):
stringOneLength = len(stringOne)
stringTwoLength = len(stringTwo)
totalChanges = 0
if abs(stringOneLength - stringTwoLength) > 1:
return False
else:
if stringOneLength == stringTwoLength:
for index in range(0,stringOneLength):
if stringOne[index] != stringTwo[index]:
totalChanges = totalChanges + 1
else:
hashTable = [0] * 27
for character in stringOne.lower():
hashTable[ord(character)%96] = hashTable[ord(character)%96] + 1
for character in stringTwo.lower():
if hashTable[ord(character)%96] > 0:
hashTable[ord(character)%96] = hashTable[ord(character)%96] - 1
else:
hashTable[ord(character)%96] = hashTable[ord(character)%96] + 1
for index in range(0,27):
if hashTable[index] > 0:
totalChanges = totalChanges +1
if totalChanges > 1:
return False
else:
return True
print isEditedOnce('pale','ple') # True ,O(n)
print isEditedOnce('pales','pale') # True ,O(n)
print isEditedOnce('pale','bale') # True ,O(n)
print isEditedOnce('pale','bake') # False ,O(n)
|
<commit_before><commit_msg>Verify String is edited only one time<commit_after>def isEditedOnce(stringOne,stringTwo):
stringOneLength = len(stringOne)
stringTwoLength = len(stringTwo)
totalChanges = 0
if abs(stringOneLength - stringTwoLength) > 1:
return False
else:
if stringOneLength == stringTwoLength:
for index in range(0,stringOneLength):
if stringOne[index] != stringTwo[index]:
totalChanges = totalChanges + 1
else:
hashTable = [0] * 27
for character in stringOne.lower():
hashTable[ord(character)%96] = hashTable[ord(character)%96] + 1
for character in stringTwo.lower():
if hashTable[ord(character)%96] > 0:
hashTable[ord(character)%96] = hashTable[ord(character)%96] - 1
else:
hashTable[ord(character)%96] = hashTable[ord(character)%96] + 1
for index in range(0,27):
if hashTable[index] > 0:
totalChanges = totalChanges +1
if totalChanges > 1:
return False
else:
return True
print isEditedOnce('pale','ple') # True ,O(n)
print isEditedOnce('pales','pale') # True ,O(n)
print isEditedOnce('pale','bale') # True ,O(n)
print isEditedOnce('pale','bake') # False ,O(n)
|
|
5d918622ca14c1d9eed88346c200eca60bccaa5f
|
yatsm/classification/roi.py
|
yatsm/classification/roi.py
|
""" Utilities for extracting training data from region of interests (ROI)
"""
import numpy as np
from rasterio.features import rasterize
from shapely.geometry import shape as geom_shape
def extract_roi(src, features, feature_prop=None, all_touched=False, fill=0):
""" Yield pixel data from ``src`` for ROIs in ``features``
Args:
src (rasterio.RasterReader): The ``rasterio`` dataset used to extract
training data values from
features (list[dict]): A list of features from a polygon vector file
given in the format used by fiona
feature_prop (str): The name of the attribute from ``features``
containing the ROI labels
all_touched (bool): Rasterization option that decides if all pixels
touching the ROI should be included, or just pixels from within
the ROI
fill (int, float): A fill value for the ROI rasterization. This fill
value should not be the same as any of the labels in the set of
ROIs
Returns:
tuple (np.ndarray, np.ndarray, np.ndarray, np.ndarray): A tuple
containing an array of ROI data from ``src`` (``band x n``), the
ROI data label (``n``), and the X and Y coordinates of each data
point (``n`` and ``n`` sized)
"""
if not feature_prop:
feature_prop = list(features[0]['properties'].keys())[0]
for feat, label in features:
geom = geom_shape(feat)
bounds = tuple(geom.bounds)
window = src.window(*bounds, boundless=True)
data = src.read(window=window, boundless=True)
shape = data.shape
transform = src.window_transform(window)
roi = rasterize(
[feat],
out_shape=shape[1:],
transform=transform,
fill=fill,
all_touched=all_touched
)
mask = np.logical_or(
(data == src.nodata).any(axis=0),
roi == fill
)
masked = np.ma.MaskedArray(
data,
mask=np.ones_like(data) * mask
)
ys, xs = np.where(~mask)
coord_xs, coord_ys = transform * (xs, ys)
masked = masked.compressed()
npix = masked.size / shape[0]
masked = masked.reshape((shape[0], npix))
label = label * np.ones(coord_ys.size, dtype=np.uint8)
yield (masked, label, coord_xs, coord_ys, )
|
Add ROI extraction code for use w/ xarray
|
Add ROI extraction code for use w/ xarray
|
Python
|
mit
|
valpasq/yatsm,c11/yatsm,valpasq/yatsm,c11/yatsm
|
Add ROI extraction code for use w/ xarray
|
""" Utilities for extracting training data from region of interests (ROI)
"""
import numpy as np
from rasterio.features import rasterize
from shapely.geometry import shape as geom_shape
def extract_roi(src, features, feature_prop=None, all_touched=False, fill=0):
""" Yield pixel data from ``src`` for ROIs in ``features``
Args:
src (rasterio.RasterReader): The ``rasterio`` dataset used to extract
training data values from
features (list[dict]): A list of features from a polygon vector file
given in the format used by fiona
feature_prop (str): The name of the attribute from ``features``
containing the ROI labels
all_touched (bool): Rasterization option that decides if all pixels
touching the ROI should be included, or just pixels from within
the ROI
fill (int, float): A fill value for the ROI rasterization. This fill
value should not be the same as any of the labels in the set of
ROIs
Returns:
tuple (np.ndarray, np.ndarray, np.ndarray, np.ndarray): A tuple
containing an array of ROI data from ``src`` (``band x n``), the
ROI data label (``n``), and the X and Y coordinates of each data
point (``n`` and ``n`` sized)
"""
if not feature_prop:
feature_prop = list(features[0]['properties'].keys())[0]
for feat, label in features:
geom = geom_shape(feat)
bounds = tuple(geom.bounds)
window = src.window(*bounds, boundless=True)
data = src.read(window=window, boundless=True)
shape = data.shape
transform = src.window_transform(window)
roi = rasterize(
[feat],
out_shape=shape[1:],
transform=transform,
fill=fill,
all_touched=all_touched
)
mask = np.logical_or(
(data == src.nodata).any(axis=0),
roi == fill
)
masked = np.ma.MaskedArray(
data,
mask=np.ones_like(data) * mask
)
ys, xs = np.where(~mask)
coord_xs, coord_ys = transform * (xs, ys)
masked = masked.compressed()
npix = masked.size / shape[0]
masked = masked.reshape((shape[0], npix))
label = label * np.ones(coord_ys.size, dtype=np.uint8)
yield (masked, label, coord_xs, coord_ys, )
|
<commit_before><commit_msg>Add ROI extraction code for use w/ xarray<commit_after>
|
""" Utilities for extracting training data from region of interests (ROI)
"""
import numpy as np
from rasterio.features import rasterize
from shapely.geometry import shape as geom_shape
def extract_roi(src, features, feature_prop=None, all_touched=False, fill=0):
""" Yield pixel data from ``src`` for ROIs in ``features``
Args:
src (rasterio.RasterReader): The ``rasterio`` dataset used to extract
training data values from
features (list[dict]): A list of features from a polygon vector file
given in the format used by fiona
feature_prop (str): The name of the attribute from ``features``
containing the ROI labels
all_touched (bool): Rasterization option that decides if all pixels
touching the ROI should be included, or just pixels from within
the ROI
fill (int, float): A fill value for the ROI rasterization. This fill
value should not be the same as any of the labels in the set of
ROIs
Returns:
tuple (np.ndarray, np.ndarray, np.ndarray, np.ndarray): A tuple
containing an array of ROI data from ``src`` (``band x n``), the
ROI data label (``n``), and the X and Y coordinates of each data
point (``n`` and ``n`` sized)
"""
if not feature_prop:
feature_prop = list(features[0]['properties'].keys())[0]
for feat, label in features:
geom = geom_shape(feat)
bounds = tuple(geom.bounds)
window = src.window(*bounds, boundless=True)
data = src.read(window=window, boundless=True)
shape = data.shape
transform = src.window_transform(window)
roi = rasterize(
[feat],
out_shape=shape[1:],
transform=transform,
fill=fill,
all_touched=all_touched
)
mask = np.logical_or(
(data == src.nodata).any(axis=0),
roi == fill
)
masked = np.ma.MaskedArray(
data,
mask=np.ones_like(data) * mask
)
ys, xs = np.where(~mask)
coord_xs, coord_ys = transform * (xs, ys)
masked = masked.compressed()
npix = masked.size / shape[0]
masked = masked.reshape((shape[0], npix))
label = label * np.ones(coord_ys.size, dtype=np.uint8)
yield (masked, label, coord_xs, coord_ys, )
|
Add ROI extraction code for use w/ xarray""" Utilities for extracting training data from region of interests (ROI)
"""
import numpy as np
from rasterio.features import rasterize
from shapely.geometry import shape as geom_shape
def extract_roi(src, features, feature_prop=None, all_touched=False, fill=0):
""" Yield pixel data from ``src`` for ROIs in ``features``
Args:
src (rasterio.RasterReader): The ``rasterio`` dataset used to extract
training data values from
features (list[dict]): A list of features from a polygon vector file
given in the format used by fiona
feature_prop (str): The name of the attribute from ``features``
containing the ROI labels
all_touched (bool): Rasterization option that decides if all pixels
touching the ROI should be included, or just pixels from within
the ROI
fill (int, float): A fill value for the ROI rasterization. This fill
value should not be the same as any of the labels in the set of
ROIs
Returns:
tuple (np.ndarray, np.ndarray, np.ndarray, np.ndarray): A tuple
containing an array of ROI data from ``src`` (``band x n``), the
ROI data label (``n``), and the X and Y coordinates of each data
point (``n`` and ``n`` sized)
"""
if not feature_prop:
feature_prop = list(features[0]['properties'].keys())[0]
for feat, label in features:
geom = geom_shape(feat)
bounds = tuple(geom.bounds)
window = src.window(*bounds, boundless=True)
data = src.read(window=window, boundless=True)
shape = data.shape
transform = src.window_transform(window)
roi = rasterize(
[feat],
out_shape=shape[1:],
transform=transform,
fill=fill,
all_touched=all_touched
)
mask = np.logical_or(
(data == src.nodata).any(axis=0),
roi == fill
)
masked = np.ma.MaskedArray(
data,
mask=np.ones_like(data) * mask
)
ys, xs = np.where(~mask)
coord_xs, coord_ys = transform * (xs, ys)
masked = masked.compressed()
npix = masked.size / shape[0]
masked = masked.reshape((shape[0], npix))
label = label * np.ones(coord_ys.size, dtype=np.uint8)
yield (masked, label, coord_xs, coord_ys, )
|
<commit_before><commit_msg>Add ROI extraction code for use w/ xarray<commit_after>""" Utilities for extracting training data from region of interests (ROI)
"""
import numpy as np
from rasterio.features import rasterize
from shapely.geometry import shape as geom_shape
def extract_roi(src, features, feature_prop=None, all_touched=False, fill=0):
""" Yield pixel data from ``src`` for ROIs in ``features``
Args:
src (rasterio.RasterReader): The ``rasterio`` dataset used to extract
training data values from
features (list[dict]): A list of features from a polygon vector file
given in the format used by fiona
feature_prop (str): The name of the attribute from ``features``
containing the ROI labels
all_touched (bool): Rasterization option that decides if all pixels
touching the ROI should be included, or just pixels from within
the ROI
fill (int, float): A fill value for the ROI rasterization. This fill
value should not be the same as any of the labels in the set of
ROIs
Returns:
tuple (np.ndarray, np.ndarray, np.ndarray, np.ndarray): A tuple
containing an array of ROI data from ``src`` (``band x n``), the
ROI data label (``n``), and the X and Y coordinates of each data
point (``n`` and ``n`` sized)
"""
if not feature_prop:
feature_prop = list(features[0]['properties'].keys())[0]
for feat, label in features:
geom = geom_shape(feat)
bounds = tuple(geom.bounds)
window = src.window(*bounds, boundless=True)
data = src.read(window=window, boundless=True)
shape = data.shape
transform = src.window_transform(window)
roi = rasterize(
[feat],
out_shape=shape[1:],
transform=transform,
fill=fill,
all_touched=all_touched
)
mask = np.logical_or(
(data == src.nodata).any(axis=0),
roi == fill
)
masked = np.ma.MaskedArray(
data,
mask=np.ones_like(data) * mask
)
ys, xs = np.where(~mask)
coord_xs, coord_ys = transform * (xs, ys)
masked = masked.compressed()
npix = masked.size / shape[0]
masked = masked.reshape((shape[0], npix))
label = label * np.ones(coord_ys.size, dtype=np.uint8)
yield (masked, label, coord_xs, coord_ys, )
|
|
f631b42aee7e144947236bcd55218cf0252e6ec8
|
h2o-py/tests/testdir_algos/gam/pyunit_gam_train_metrics.py
|
h2o-py/tests/testdir_algos/gam/pyunit_gam_train_metrics.py
|
from __future__ import division
from __future__ import print_function
import sys
sys.path.insert(1,"../../../")
import h2o
from h2o.estimators.gam import H2OGeneralizedAdditiveEstimator
import pandas as pd
import numpy as np
from tests import pyunit_utils
def gam_train_metrics_recalculate(family):
np.random.seed(1234)
n_rows = 1000
data = {
"X1": np.random.randn(n_rows),
"X2": np.random.randn(n_rows),
"X3": np.random.randn(n_rows),
"W": np.random.choice([10, 20], size=n_rows),
"Y": np.random.choice([0, 0, 0, 0, 0, 10, 20, 30], size=n_rows) + 0.1
}
train = h2o.H2OFrame(pd.DataFrame(data))
test = train.drop("W")
print(train)
h2o_model = H2OGeneralizedAdditiveEstimator(family=family,
gam_columns=["X3"],
weights_column="W",
lambda_=0,
tweedie_variance_power=1.5,
tweedie_link_power=0)
h2o_model.train(x=["X1", "X2"], y="Y", training_frame=train)
# force H2O to recalculate metrics instead just taking them from metrics cache
train_clone = h2o.H2OFrame(pd.DataFrame(data))
print("GAM performance with test_data=train: {0}, with test_data=test: {1} and train=True: "
"{2}".format(h2o_model.model_performance(test_data=train)._metric_json["MSE"],
h2o_model.model_performance(test_data=test)._metric_json["MSE"],
h2o_model.model_performance(train=True)._metric_json["MSE"]))
assert abs(h2o_model.model_performance(test_data=train_clone)._metric_json["MSE"] - h2o_model.model_performance(train=True)._metric_json["MSE"]) < 1e-6
def gam_train_metrics_recalculate_poisson():
gam_train_metrics_recalculate("poisson")
def gam_train_metrics_recalculate_tweedie():
gam_train_metrics_recalculate("tweedie")
def gam_train_metrics_recalculate_gamma():
gam_train_metrics_recalculate("gamma")
def gam_train_metrics_recalculate_gaussian():
gam_train_metrics_recalculate("gaussian")
pyunit_utils.run_tests([
gam_train_metrics_recalculate_poisson,
gam_train_metrics_recalculate_tweedie,
gam_train_metrics_recalculate_gamma,
gam_train_metrics_recalculate_gaussian
])
|
Add test to show GAM metrics are correct
|
PUBDEV-8455: Add test to show GAM metrics are correct
We compare training metrics calculated in training and user-calculated
metrics.
|
Python
|
apache-2.0
|
h2oai/h2o-3,h2oai/h2o-3,h2oai/h2o-3,h2oai/h2o-3,h2oai/h2o-3,h2oai/h2o-3,h2oai/h2o-3,h2oai/h2o-3
|
PUBDEV-8455: Add test to show GAM metrics are correct
We compare training metrics calculated in training and user-calculated
metrics.
|
from __future__ import division
from __future__ import print_function
import sys
sys.path.insert(1,"../../../")
import h2o
from h2o.estimators.gam import H2OGeneralizedAdditiveEstimator
import pandas as pd
import numpy as np
from tests import pyunit_utils
def gam_train_metrics_recalculate(family):
np.random.seed(1234)
n_rows = 1000
data = {
"X1": np.random.randn(n_rows),
"X2": np.random.randn(n_rows),
"X3": np.random.randn(n_rows),
"W": np.random.choice([10, 20], size=n_rows),
"Y": np.random.choice([0, 0, 0, 0, 0, 10, 20, 30], size=n_rows) + 0.1
}
train = h2o.H2OFrame(pd.DataFrame(data))
test = train.drop("W")
print(train)
h2o_model = H2OGeneralizedAdditiveEstimator(family=family,
gam_columns=["X3"],
weights_column="W",
lambda_=0,
tweedie_variance_power=1.5,
tweedie_link_power=0)
h2o_model.train(x=["X1", "X2"], y="Y", training_frame=train)
# force H2O to recalculate metrics instead just taking them from metrics cache
train_clone = h2o.H2OFrame(pd.DataFrame(data))
print("GAM performance with test_data=train: {0}, with test_data=test: {1} and train=True: "
"{2}".format(h2o_model.model_performance(test_data=train)._metric_json["MSE"],
h2o_model.model_performance(test_data=test)._metric_json["MSE"],
h2o_model.model_performance(train=True)._metric_json["MSE"]))
assert abs(h2o_model.model_performance(test_data=train_clone)._metric_json["MSE"] - h2o_model.model_performance(train=True)._metric_json["MSE"]) < 1e-6
def gam_train_metrics_recalculate_poisson():
gam_train_metrics_recalculate("poisson")
def gam_train_metrics_recalculate_tweedie():
gam_train_metrics_recalculate("tweedie")
def gam_train_metrics_recalculate_gamma():
gam_train_metrics_recalculate("gamma")
def gam_train_metrics_recalculate_gaussian():
gam_train_metrics_recalculate("gaussian")
pyunit_utils.run_tests([
gam_train_metrics_recalculate_poisson,
gam_train_metrics_recalculate_tweedie,
gam_train_metrics_recalculate_gamma,
gam_train_metrics_recalculate_gaussian
])
|
<commit_before><commit_msg>PUBDEV-8455: Add test to show GAM metrics are correct
We compare training metrics calculated in training and user-calculated
metrics.<commit_after>
|
from __future__ import division
from __future__ import print_function
import sys
sys.path.insert(1,"../../../")
import h2o
from h2o.estimators.gam import H2OGeneralizedAdditiveEstimator
import pandas as pd
import numpy as np
from tests import pyunit_utils
def gam_train_metrics_recalculate(family):
np.random.seed(1234)
n_rows = 1000
data = {
"X1": np.random.randn(n_rows),
"X2": np.random.randn(n_rows),
"X3": np.random.randn(n_rows),
"W": np.random.choice([10, 20], size=n_rows),
"Y": np.random.choice([0, 0, 0, 0, 0, 10, 20, 30], size=n_rows) + 0.1
}
train = h2o.H2OFrame(pd.DataFrame(data))
test = train.drop("W")
print(train)
h2o_model = H2OGeneralizedAdditiveEstimator(family=family,
gam_columns=["X3"],
weights_column="W",
lambda_=0,
tweedie_variance_power=1.5,
tweedie_link_power=0)
h2o_model.train(x=["X1", "X2"], y="Y", training_frame=train)
# force H2O to recalculate metrics instead just taking them from metrics cache
train_clone = h2o.H2OFrame(pd.DataFrame(data))
print("GAM performance with test_data=train: {0}, with test_data=test: {1} and train=True: "
"{2}".format(h2o_model.model_performance(test_data=train)._metric_json["MSE"],
h2o_model.model_performance(test_data=test)._metric_json["MSE"],
h2o_model.model_performance(train=True)._metric_json["MSE"]))
assert abs(h2o_model.model_performance(test_data=train_clone)._metric_json["MSE"] - h2o_model.model_performance(train=True)._metric_json["MSE"]) < 1e-6
def gam_train_metrics_recalculate_poisson():
gam_train_metrics_recalculate("poisson")
def gam_train_metrics_recalculate_tweedie():
gam_train_metrics_recalculate("tweedie")
def gam_train_metrics_recalculate_gamma():
gam_train_metrics_recalculate("gamma")
def gam_train_metrics_recalculate_gaussian():
gam_train_metrics_recalculate("gaussian")
pyunit_utils.run_tests([
gam_train_metrics_recalculate_poisson,
gam_train_metrics_recalculate_tweedie,
gam_train_metrics_recalculate_gamma,
gam_train_metrics_recalculate_gaussian
])
|
PUBDEV-8455: Add test to show GAM metrics are correct
We compare training metrics calculated in training and user-calculated
metrics.from __future__ import division
from __future__ import print_function
import sys
sys.path.insert(1,"../../../")
import h2o
from h2o.estimators.gam import H2OGeneralizedAdditiveEstimator
import pandas as pd
import numpy as np
from tests import pyunit_utils
def gam_train_metrics_recalculate(family):
np.random.seed(1234)
n_rows = 1000
data = {
"X1": np.random.randn(n_rows),
"X2": np.random.randn(n_rows),
"X3": np.random.randn(n_rows),
"W": np.random.choice([10, 20], size=n_rows),
"Y": np.random.choice([0, 0, 0, 0, 0, 10, 20, 30], size=n_rows) + 0.1
}
train = h2o.H2OFrame(pd.DataFrame(data))
test = train.drop("W")
print(train)
h2o_model = H2OGeneralizedAdditiveEstimator(family=family,
gam_columns=["X3"],
weights_column="W",
lambda_=0,
tweedie_variance_power=1.5,
tweedie_link_power=0)
h2o_model.train(x=["X1", "X2"], y="Y", training_frame=train)
# force H2O to recalculate metrics instead just taking them from metrics cache
train_clone = h2o.H2OFrame(pd.DataFrame(data))
print("GAM performance with test_data=train: {0}, with test_data=test: {1} and train=True: "
"{2}".format(h2o_model.model_performance(test_data=train)._metric_json["MSE"],
h2o_model.model_performance(test_data=test)._metric_json["MSE"],
h2o_model.model_performance(train=True)._metric_json["MSE"]))
assert abs(h2o_model.model_performance(test_data=train_clone)._metric_json["MSE"] - h2o_model.model_performance(train=True)._metric_json["MSE"]) < 1e-6
def gam_train_metrics_recalculate_poisson():
gam_train_metrics_recalculate("poisson")
def gam_train_metrics_recalculate_tweedie():
gam_train_metrics_recalculate("tweedie")
def gam_train_metrics_recalculate_gamma():
gam_train_metrics_recalculate("gamma")
def gam_train_metrics_recalculate_gaussian():
gam_train_metrics_recalculate("gaussian")
pyunit_utils.run_tests([
gam_train_metrics_recalculate_poisson,
gam_train_metrics_recalculate_tweedie,
gam_train_metrics_recalculate_gamma,
gam_train_metrics_recalculate_gaussian
])
|
<commit_before><commit_msg>PUBDEV-8455: Add test to show GAM metrics are correct
We compare training metrics calculated in training and user-calculated
metrics.<commit_after>from __future__ import division
from __future__ import print_function
import sys
sys.path.insert(1,"../../../")
import h2o
from h2o.estimators.gam import H2OGeneralizedAdditiveEstimator
import pandas as pd
import numpy as np
from tests import pyunit_utils
def gam_train_metrics_recalculate(family):
np.random.seed(1234)
n_rows = 1000
data = {
"X1": np.random.randn(n_rows),
"X2": np.random.randn(n_rows),
"X3": np.random.randn(n_rows),
"W": np.random.choice([10, 20], size=n_rows),
"Y": np.random.choice([0, 0, 0, 0, 0, 10, 20, 30], size=n_rows) + 0.1
}
train = h2o.H2OFrame(pd.DataFrame(data))
test = train.drop("W")
print(train)
h2o_model = H2OGeneralizedAdditiveEstimator(family=family,
gam_columns=["X3"],
weights_column="W",
lambda_=0,
tweedie_variance_power=1.5,
tweedie_link_power=0)
h2o_model.train(x=["X1", "X2"], y="Y", training_frame=train)
# force H2O to recalculate metrics instead just taking them from metrics cache
train_clone = h2o.H2OFrame(pd.DataFrame(data))
print("GAM performance with test_data=train: {0}, with test_data=test: {1} and train=True: "
"{2}".format(h2o_model.model_performance(test_data=train)._metric_json["MSE"],
h2o_model.model_performance(test_data=test)._metric_json["MSE"],
h2o_model.model_performance(train=True)._metric_json["MSE"]))
assert abs(h2o_model.model_performance(test_data=train_clone)._metric_json["MSE"] - h2o_model.model_performance(train=True)._metric_json["MSE"]) < 1e-6
def gam_train_metrics_recalculate_poisson():
gam_train_metrics_recalculate("poisson")
def gam_train_metrics_recalculate_tweedie():
gam_train_metrics_recalculate("tweedie")
def gam_train_metrics_recalculate_gamma():
gam_train_metrics_recalculate("gamma")
def gam_train_metrics_recalculate_gaussian():
gam_train_metrics_recalculate("gaussian")
pyunit_utils.run_tests([
gam_train_metrics_recalculate_poisson,
gam_train_metrics_recalculate_tweedie,
gam_train_metrics_recalculate_gamma,
gam_train_metrics_recalculate_gaussian
])
|
|
d21d5136351d92a384ea7d5b7db5224ec648ad0f
|
src/ggrc_risks/migrations/versions/20160513135106_47bf3f1f9be8_add_url_and_reference_url_columns.py
|
src/ggrc_risks/migrations/versions/20160513135106_47bf3f1f9be8_add_url_and_reference_url_columns.py
|
# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: urban@reciprocitylabs.com
# Maintained By: urban@reciprocitylabs.com
"""
Add url and reference_url columns
Create Date: 2016-05-13 13:51:06.534663
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '47bf3f1f9be8'
down_revision = '17ae137bda7a'
def upgrade():
"""Add url and reference_url columns"""
op.add_column("risks",
sa.Column("url", sa.String(length=250),
nullable=True))
op.add_column("risks",
sa.Column("reference_url", sa.String(length=250),
nullable=True))
def downgrade():
op.drop_column("risks", "url")
op.drop_column("risks", "reference_url")
|
Add `url` and `reference_url` columns to risk object
|
Add `url` and `reference_url` columns to risk object
|
Python
|
apache-2.0
|
andrei-karalionak/ggrc-core,edofic/ggrc-core,kr41/ggrc-core,josthkko/ggrc-core,andrei-karalionak/ggrc-core,NejcZupec/ggrc-core,andrei-karalionak/ggrc-core,j0gurt/ggrc-core,NejcZupec/ggrc-core,AleksNeStu/ggrc-core,josthkko/ggrc-core,j0gurt/ggrc-core,AleksNeStu/ggrc-core,josthkko/ggrc-core,selahssea/ggrc-core,VinnieJohns/ggrc-core,plamut/ggrc-core,kr41/ggrc-core,andrei-karalionak/ggrc-core,NejcZupec/ggrc-core,AleksNeStu/ggrc-core,j0gurt/ggrc-core,NejcZupec/ggrc-core,plamut/ggrc-core,j0gurt/ggrc-core,edofic/ggrc-core,selahssea/ggrc-core,plamut/ggrc-core,VinnieJohns/ggrc-core,kr41/ggrc-core,edofic/ggrc-core,selahssea/ggrc-core,josthkko/ggrc-core,VinnieJohns/ggrc-core,AleksNeStu/ggrc-core,selahssea/ggrc-core,kr41/ggrc-core,VinnieJohns/ggrc-core,plamut/ggrc-core,edofic/ggrc-core
|
Add `url` and `reference_url` columns to risk object
|
# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: urban@reciprocitylabs.com
# Maintained By: urban@reciprocitylabs.com
"""
Add url and reference_url columns
Create Date: 2016-05-13 13:51:06.534663
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '47bf3f1f9be8'
down_revision = '17ae137bda7a'
def upgrade():
"""Add url and reference_url columns"""
op.add_column("risks",
sa.Column("url", sa.String(length=250),
nullable=True))
op.add_column("risks",
sa.Column("reference_url", sa.String(length=250),
nullable=True))
def downgrade():
op.drop_column("risks", "url")
op.drop_column("risks", "reference_url")
|
<commit_before><commit_msg>Add `url` and `reference_url` columns to risk object<commit_after>
|
# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: urban@reciprocitylabs.com
# Maintained By: urban@reciprocitylabs.com
"""
Add url and reference_url columns
Create Date: 2016-05-13 13:51:06.534663
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '47bf3f1f9be8'
down_revision = '17ae137bda7a'
def upgrade():
"""Add url and reference_url columns"""
op.add_column("risks",
sa.Column("url", sa.String(length=250),
nullable=True))
op.add_column("risks",
sa.Column("reference_url", sa.String(length=250),
nullable=True))
def downgrade():
op.drop_column("risks", "url")
op.drop_column("risks", "reference_url")
|
Add `url` and `reference_url` columns to risk object# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: urban@reciprocitylabs.com
# Maintained By: urban@reciprocitylabs.com
"""
Add url and reference_url columns
Create Date: 2016-05-13 13:51:06.534663
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '47bf3f1f9be8'
down_revision = '17ae137bda7a'
def upgrade():
"""Add url and reference_url columns"""
op.add_column("risks",
sa.Column("url", sa.String(length=250),
nullable=True))
op.add_column("risks",
sa.Column("reference_url", sa.String(length=250),
nullable=True))
def downgrade():
op.drop_column("risks", "url")
op.drop_column("risks", "reference_url")
|
<commit_before><commit_msg>Add `url` and `reference_url` columns to risk object<commit_after># Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: urban@reciprocitylabs.com
# Maintained By: urban@reciprocitylabs.com
"""
Add url and reference_url columns
Create Date: 2016-05-13 13:51:06.534663
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '47bf3f1f9be8'
down_revision = '17ae137bda7a'
def upgrade():
"""Add url and reference_url columns"""
op.add_column("risks",
sa.Column("url", sa.String(length=250),
nullable=True))
op.add_column("risks",
sa.Column("reference_url", sa.String(length=250),
nullable=True))
def downgrade():
op.drop_column("risks", "url")
op.drop_column("risks", "reference_url")
|
|
1922197472b5afcacdae37da2c4e66856e74116e
|
storage/mongo_storage.py
|
storage/mongo_storage.py
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/
'''
Storage module to interact with MongoDB.
Provides a MongoStorage helper class with the following
functions:
setup: initialize the Mongo client connection
store: takes in a report dictionary and posts it to
mongo instance. Returns a list of report id's.
get_report: Given a report_id (a sha256 hash), return
the report.
delete: Given a report_id (a sha256 hash), delete the
specified report.
'''
from storage import Storage
from uuid import uuid4
from pymongo import MongoClient
class MongoStorage(Storage):
'''
Subclass of Storage. Allows user to interact
with backend Mongo database
'''
DEFAULTCONF = {
'ENABLED': False,
'host': 'localhost',
'port': 27017,
'database': 'multiscanner_reports',
'collection': 'reports',
}
def setup(self):
self.host = self.config['host']
self.port = self.config['port']
self.client = MongoClient(
host=self.host,
port=self.port
)
self.database = getattr(self.client, self.config['database'])
self.collection = getattr(self.database, self.config['collection'])
return True
def store(self, report):
report_id_list = []
for filename in report:
report[filename]['filename'] = filename
try:
report_id = report[filename]['SHA256']
except KeyError:
report_id = uuid4()
report_id_list.append(report_id)
self.collection.update(
{'_id': report_id},
report[filename],
True
)
return report_id_list
def get_report(self, report_id):
result = self.collection.find({'_id': report_id})
if result.count == 0:
return json.dumps({})
return json.dumps(result[0])
def delete(self, report_id):
result = self.collection.delete_one({'_id': report_id})
if result.deleted_count == 0:
return False
return True
|
Add storage module for mongo DB
|
Add storage module for mongo DB
|
Python
|
mpl-2.0
|
awest1339/multiscanner,mitre/multiscanner,MITRECND/multiscanner,awest1339/multiscanner,jmlong1027/multiscanner,mitre/multiscanner,jmlong1027/multiscanner,awest1339/multiscanner,MITRECND/multiscanner,jmlong1027/multiscanner,jmlong1027/multiscanner,mitre/multiscanner,awest1339/multiscanner
|
Add storage module for mongo DB
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/
'''
Storage module to interact with MongoDB.
Provides a MongoStorage helper class with the following
functions:
setup: initialize the Mongo client connection
store: takes in a report dictionary and posts it to
mongo instance. Returns a list of report id's.
get_report: Given a report_id (a sha256 hash), return
the report.
delete: Given a report_id (a sha256 hash), delete the
specified report.
'''
from storage import Storage
from uuid import uuid4
from pymongo import MongoClient
class MongoStorage(Storage):
'''
Subclass of Storage. Allows user to interact
with backend Mongo database
'''
DEFAULTCONF = {
'ENABLED': False,
'host': 'localhost',
'port': 27017,
'database': 'multiscanner_reports',
'collection': 'reports',
}
def setup(self):
self.host = self.config['host']
self.port = self.config['port']
self.client = MongoClient(
host=self.host,
port=self.port
)
self.database = getattr(self.client, self.config['database'])
self.collection = getattr(self.database, self.config['collection'])
return True
def store(self, report):
report_id_list = []
for filename in report:
report[filename]['filename'] = filename
try:
report_id = report[filename]['SHA256']
except KeyError:
report_id = uuid4()
report_id_list.append(report_id)
self.collection.update(
{'_id': report_id},
report[filename],
True
)
return report_id_list
def get_report(self, report_id):
result = self.collection.find({'_id': report_id})
if result.count == 0:
return json.dumps({})
return json.dumps(result[0])
def delete(self, report_id):
result = self.collection.delete_one({'_id': report_id})
if result.deleted_count == 0:
return False
return True
|
<commit_before><commit_msg>Add storage module for mongo DB<commit_after>
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/
'''
Storage module to interact with MongoDB.
Provides a MongoStorage helper class with the following
functions:
setup: initialize the Mongo client connection
store: takes in a report dictionary and posts it to
mongo instance. Returns a list of report id's.
get_report: Given a report_id (a sha256 hash), return
the report.
delete: Given a report_id (a sha256 hash), delete the
specified report.
'''
from storage import Storage
from uuid import uuid4
from pymongo import MongoClient
class MongoStorage(Storage):
'''
Subclass of Storage. Allows user to interact
with backend Mongo database
'''
DEFAULTCONF = {
'ENABLED': False,
'host': 'localhost',
'port': 27017,
'database': 'multiscanner_reports',
'collection': 'reports',
}
def setup(self):
self.host = self.config['host']
self.port = self.config['port']
self.client = MongoClient(
host=self.host,
port=self.port
)
self.database = getattr(self.client, self.config['database'])
self.collection = getattr(self.database, self.config['collection'])
return True
def store(self, report):
report_id_list = []
for filename in report:
report[filename]['filename'] = filename
try:
report_id = report[filename]['SHA256']
except KeyError:
report_id = uuid4()
report_id_list.append(report_id)
self.collection.update(
{'_id': report_id},
report[filename],
True
)
return report_id_list
def get_report(self, report_id):
result = self.collection.find({'_id': report_id})
if result.count == 0:
return json.dumps({})
return json.dumps(result[0])
def delete(self, report_id):
result = self.collection.delete_one({'_id': report_id})
if result.deleted_count == 0:
return False
return True
|
Add storage module for mongo DB# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/
'''
Storage module to interact with MongoDB.
Provides a MongoStorage helper class with the following
functions:
setup: initialize the Mongo client connection
store: takes in a report dictionary and posts it to
mongo instance. Returns a list of report id's.
get_report: Given a report_id (a sha256 hash), return
the report.
delete: Given a report_id (a sha256 hash), delete the
specified report.
'''
from storage import Storage
from uuid import uuid4
from pymongo import MongoClient
class MongoStorage(Storage):
'''
Subclass of Storage. Allows user to interact
with backend Mongo database
'''
DEFAULTCONF = {
'ENABLED': False,
'host': 'localhost',
'port': 27017,
'database': 'multiscanner_reports',
'collection': 'reports',
}
def setup(self):
self.host = self.config['host']
self.port = self.config['port']
self.client = MongoClient(
host=self.host,
port=self.port
)
self.database = getattr(self.client, self.config['database'])
self.collection = getattr(self.database, self.config['collection'])
return True
def store(self, report):
report_id_list = []
for filename in report:
report[filename]['filename'] = filename
try:
report_id = report[filename]['SHA256']
except KeyError:
report_id = uuid4()
report_id_list.append(report_id)
self.collection.update(
{'_id': report_id},
report[filename],
True
)
return report_id_list
def get_report(self, report_id):
result = self.collection.find({'_id': report_id})
if result.count == 0:
return json.dumps({})
return json.dumps(result[0])
def delete(self, report_id):
result = self.collection.delete_one({'_id': report_id})
if result.deleted_count == 0:
return False
return True
|
<commit_before><commit_msg>Add storage module for mongo DB<commit_after># This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/
'''
Storage module to interact with MongoDB.
Provides a MongoStorage helper class with the following
functions:
setup: initialize the Mongo client connection
store: takes in a report dictionary and posts it to
mongo instance. Returns a list of report id's.
get_report: Given a report_id (a sha256 hash), return
the report.
delete: Given a report_id (a sha256 hash), delete the
specified report.
'''
from storage import Storage
from uuid import uuid4
from pymongo import MongoClient
class MongoStorage(Storage):
'''
Subclass of Storage. Allows user to interact
with backend Mongo database
'''
DEFAULTCONF = {
'ENABLED': False,
'host': 'localhost',
'port': 27017,
'database': 'multiscanner_reports',
'collection': 'reports',
}
def setup(self):
self.host = self.config['host']
self.port = self.config['port']
self.client = MongoClient(
host=self.host,
port=self.port
)
self.database = getattr(self.client, self.config['database'])
self.collection = getattr(self.database, self.config['collection'])
return True
def store(self, report):
report_id_list = []
for filename in report:
report[filename]['filename'] = filename
try:
report_id = report[filename]['SHA256']
except KeyError:
report_id = uuid4()
report_id_list.append(report_id)
self.collection.update(
{'_id': report_id},
report[filename],
True
)
return report_id_list
def get_report(self, report_id):
result = self.collection.find({'_id': report_id})
if result.count == 0:
return json.dumps({})
return json.dumps(result[0])
def delete(self, report_id):
result = self.collection.delete_one({'_id': report_id})
if result.deleted_count == 0:
return False
return True
|
|
c050cbd0f13f34915854137dced4003b8836f451
|
scripts/image_signing/security_test_artifact.py
|
scripts/image_signing/security_test_artifact.py
|
#!/usr/bin/env python3
# Copyright 2022 The ChromiumOS Authors.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Run security tests on an artifact"""
import argparse
import os
from pathlib import Path
import subprocess
import sys
DIR = Path(__file__).resolve().parent
def exec_test(name, input, args):
"""Runs a given script
Args:
name: the name of the script to execute
input: the input artifact
args: list of additional arguments for the script
"""
# Ensure this script can execute from any directory
cmd_path = DIR / f"{name}.sh"
cmd = [cmd_path, input] + args
ret = subprocess.run(cmd, check=False)
if ret.returncode:
sys.exit(ret.returncode)
def get_parser():
"""Creates an argument parser"""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--config",
"-c",
help="Security test baseline config directory",
required=True,
type=Path,
)
parser.add_argument(
"--input",
"-i",
help="Artfact to test",
required=True,
type=Path,
)
parser.add_argument(
"--keyset-is-mp",
action="store_true",
help="Target artifact is signed with a mass production keyset",
default=False,
)
return parser
def main(argv):
"""Main function, parses arguments and invokes the relevant scripts"""
parser = get_parser()
opts = parser.parse_args(argv)
# Run generic baseline tests.
baseline_tests = [
"ensure_sane_lsb-release",
]
if opts.keyset_is_mp:
baseline_tests += [
"ensure_no_nonrelease_files",
"ensure_secure_kernelparams",
]
for test in baseline_tests:
exec_test(
test, opts.input, [os.path.join(opts.config, f"{test}.config")]
)
# Run generic non-baseline tests.
tests = []
if opts.keyset_is_mp:
tests += [
"ensure_not_ASAN",
"ensure_not_tainted_license",
"ensure_update_verification",
]
for test in tests:
exec_test(test, opts.input, [])
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
|
Add general security test script
|
scripts/image_signing: Add general security test script
This commit adds a general security test script. This allows
some logic to be moved out of the signer as well as providing a single
entry point for the security tests run by the signer.
BRANCH=none
BUG=b:202397678
TEST=Verified that correct security tests ran with/without
`--keyset-is-mp`
Change-Id: Ib4c779a90d2fe9160c278f20d7ec61242f1d68cc
Signed-off-by: Robert Zieba <33e0ad3807fbfa18c105b3fe38a4090f81988052@google.com>
Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/platform/vboot_reference/+/3820999
Reviewed-by: Mike Frysinger <8f3f75c74bd5184edcfa6534cab3c13a00a2f794@chromium.org>
Commit-Queue: Mike Frysinger <8f3f75c74bd5184edcfa6534cab3c13a00a2f794@chromium.org>
|
Python
|
bsd-3-clause
|
coreboot/vboot,coreboot/vboot,coreboot/vboot,coreboot/vboot,coreboot/vboot
|
scripts/image_signing: Add general security test script
This commit adds a general security test script. This allows
some logic to be moved out of the signer as well as providing a single
entry point for the security tests run by the signer.
BRANCH=none
BUG=b:202397678
TEST=Verified that correct security tests ran with/without
`--keyset-is-mp`
Change-Id: Ib4c779a90d2fe9160c278f20d7ec61242f1d68cc
Signed-off-by: Robert Zieba <33e0ad3807fbfa18c105b3fe38a4090f81988052@google.com>
Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/platform/vboot_reference/+/3820999
Reviewed-by: Mike Frysinger <8f3f75c74bd5184edcfa6534cab3c13a00a2f794@chromium.org>
Commit-Queue: Mike Frysinger <8f3f75c74bd5184edcfa6534cab3c13a00a2f794@chromium.org>
|
#!/usr/bin/env python3
# Copyright 2022 The ChromiumOS Authors.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Run security tests on an artifact"""
import argparse
import os
from pathlib import Path
import subprocess
import sys
DIR = Path(__file__).resolve().parent
def exec_test(name, input, args):
"""Runs a given script
Args:
name: the name of the script to execute
input: the input artifact
args: list of additional arguments for the script
"""
# Ensure this script can execute from any directory
cmd_path = DIR / f"{name}.sh"
cmd = [cmd_path, input] + args
ret = subprocess.run(cmd, check=False)
if ret.returncode:
sys.exit(ret.returncode)
def get_parser():
"""Creates an argument parser"""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--config",
"-c",
help="Security test baseline config directory",
required=True,
type=Path,
)
parser.add_argument(
"--input",
"-i",
help="Artfact to test",
required=True,
type=Path,
)
parser.add_argument(
"--keyset-is-mp",
action="store_true",
help="Target artifact is signed with a mass production keyset",
default=False,
)
return parser
def main(argv):
"""Main function, parses arguments and invokes the relevant scripts"""
parser = get_parser()
opts = parser.parse_args(argv)
# Run generic baseline tests.
baseline_tests = [
"ensure_sane_lsb-release",
]
if opts.keyset_is_mp:
baseline_tests += [
"ensure_no_nonrelease_files",
"ensure_secure_kernelparams",
]
for test in baseline_tests:
exec_test(
test, opts.input, [os.path.join(opts.config, f"{test}.config")]
)
# Run generic non-baseline tests.
tests = []
if opts.keyset_is_mp:
tests += [
"ensure_not_ASAN",
"ensure_not_tainted_license",
"ensure_update_verification",
]
for test in tests:
exec_test(test, opts.input, [])
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
|
<commit_before><commit_msg>scripts/image_signing: Add general security test script
This commit adds a general security test script. This allows
some logic to be moved out of the signer as well as providing a single
entry point for the security tests run by the signer.
BRANCH=none
BUG=b:202397678
TEST=Verified that correct security tests ran with/without
`--keyset-is-mp`
Change-Id: Ib4c779a90d2fe9160c278f20d7ec61242f1d68cc
Signed-off-by: Robert Zieba <33e0ad3807fbfa18c105b3fe38a4090f81988052@google.com>
Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/platform/vboot_reference/+/3820999
Reviewed-by: Mike Frysinger <8f3f75c74bd5184edcfa6534cab3c13a00a2f794@chromium.org>
Commit-Queue: Mike Frysinger <8f3f75c74bd5184edcfa6534cab3c13a00a2f794@chromium.org><commit_after>
|
#!/usr/bin/env python3
# Copyright 2022 The ChromiumOS Authors.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Run security tests on an artifact"""
import argparse
import os
from pathlib import Path
import subprocess
import sys
DIR = Path(__file__).resolve().parent
def exec_test(name, input, args):
"""Runs a given script
Args:
name: the name of the script to execute
input: the input artifact
args: list of additional arguments for the script
"""
# Ensure this script can execute from any directory
cmd_path = DIR / f"{name}.sh"
cmd = [cmd_path, input] + args
ret = subprocess.run(cmd, check=False)
if ret.returncode:
sys.exit(ret.returncode)
def get_parser():
"""Creates an argument parser"""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--config",
"-c",
help="Security test baseline config directory",
required=True,
type=Path,
)
parser.add_argument(
"--input",
"-i",
help="Artfact to test",
required=True,
type=Path,
)
parser.add_argument(
"--keyset-is-mp",
action="store_true",
help="Target artifact is signed with a mass production keyset",
default=False,
)
return parser
def main(argv):
"""Main function, parses arguments and invokes the relevant scripts"""
parser = get_parser()
opts = parser.parse_args(argv)
# Run generic baseline tests.
baseline_tests = [
"ensure_sane_lsb-release",
]
if opts.keyset_is_mp:
baseline_tests += [
"ensure_no_nonrelease_files",
"ensure_secure_kernelparams",
]
for test in baseline_tests:
exec_test(
test, opts.input, [os.path.join(opts.config, f"{test}.config")]
)
# Run generic non-baseline tests.
tests = []
if opts.keyset_is_mp:
tests += [
"ensure_not_ASAN",
"ensure_not_tainted_license",
"ensure_update_verification",
]
for test in tests:
exec_test(test, opts.input, [])
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
|
scripts/image_signing: Add general security test script
This commit adds a general security test script. This allows
some logic to be moved out of the signer as well as providing a single
entry point for the security tests run by the signer.
BRANCH=none
BUG=b:202397678
TEST=Verified that correct security tests ran with/without
`--keyset-is-mp`
Change-Id: Ib4c779a90d2fe9160c278f20d7ec61242f1d68cc
Signed-off-by: Robert Zieba <33e0ad3807fbfa18c105b3fe38a4090f81988052@google.com>
Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/platform/vboot_reference/+/3820999
Reviewed-by: Mike Frysinger <8f3f75c74bd5184edcfa6534cab3c13a00a2f794@chromium.org>
Commit-Queue: Mike Frysinger <8f3f75c74bd5184edcfa6534cab3c13a00a2f794@chromium.org>#!/usr/bin/env python3
# Copyright 2022 The ChromiumOS Authors.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Run security tests on an artifact"""
import argparse
import os
from pathlib import Path
import subprocess
import sys
DIR = Path(__file__).resolve().parent
def exec_test(name, input, args):
"""Runs a given script
Args:
name: the name of the script to execute
input: the input artifact
args: list of additional arguments for the script
"""
# Ensure this script can execute from any directory
cmd_path = DIR / f"{name}.sh"
cmd = [cmd_path, input] + args
ret = subprocess.run(cmd, check=False)
if ret.returncode:
sys.exit(ret.returncode)
def get_parser():
"""Creates an argument parser"""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--config",
"-c",
help="Security test baseline config directory",
required=True,
type=Path,
)
parser.add_argument(
"--input",
"-i",
help="Artfact to test",
required=True,
type=Path,
)
parser.add_argument(
"--keyset-is-mp",
action="store_true",
help="Target artifact is signed with a mass production keyset",
default=False,
)
return parser
def main(argv):
"""Main function, parses arguments and invokes the relevant scripts"""
parser = get_parser()
opts = parser.parse_args(argv)
# Run generic baseline tests.
baseline_tests = [
"ensure_sane_lsb-release",
]
if opts.keyset_is_mp:
baseline_tests += [
"ensure_no_nonrelease_files",
"ensure_secure_kernelparams",
]
for test in baseline_tests:
exec_test(
test, opts.input, [os.path.join(opts.config, f"{test}.config")]
)
# Run generic non-baseline tests.
tests = []
if opts.keyset_is_mp:
tests += [
"ensure_not_ASAN",
"ensure_not_tainted_license",
"ensure_update_verification",
]
for test in tests:
exec_test(test, opts.input, [])
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
|
<commit_before><commit_msg>scripts/image_signing: Add general security test script
This commit adds a general security test script. This allows
some logic to be moved out of the signer as well as providing a single
entry point for the security tests run by the signer.
BRANCH=none
BUG=b:202397678
TEST=Verified that correct security tests ran with/without
`--keyset-is-mp`
Change-Id: Ib4c779a90d2fe9160c278f20d7ec61242f1d68cc
Signed-off-by: Robert Zieba <33e0ad3807fbfa18c105b3fe38a4090f81988052@google.com>
Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/platform/vboot_reference/+/3820999
Reviewed-by: Mike Frysinger <8f3f75c74bd5184edcfa6534cab3c13a00a2f794@chromium.org>
Commit-Queue: Mike Frysinger <8f3f75c74bd5184edcfa6534cab3c13a00a2f794@chromium.org><commit_after>#!/usr/bin/env python3
# Copyright 2022 The ChromiumOS Authors.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Run security tests on an artifact"""
import argparse
import os
from pathlib import Path
import subprocess
import sys
DIR = Path(__file__).resolve().parent
def exec_test(name, input, args):
"""Runs a given script
Args:
name: the name of the script to execute
input: the input artifact
args: list of additional arguments for the script
"""
# Ensure this script can execute from any directory
cmd_path = DIR / f"{name}.sh"
cmd = [cmd_path, input] + args
ret = subprocess.run(cmd, check=False)
if ret.returncode:
sys.exit(ret.returncode)
def get_parser():
"""Creates an argument parser"""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--config",
"-c",
help="Security test baseline config directory",
required=True,
type=Path,
)
parser.add_argument(
"--input",
"-i",
help="Artfact to test",
required=True,
type=Path,
)
parser.add_argument(
"--keyset-is-mp",
action="store_true",
help="Target artifact is signed with a mass production keyset",
default=False,
)
return parser
def main(argv):
"""Main function, parses arguments and invokes the relevant scripts"""
parser = get_parser()
opts = parser.parse_args(argv)
# Run generic baseline tests.
baseline_tests = [
"ensure_sane_lsb-release",
]
if opts.keyset_is_mp:
baseline_tests += [
"ensure_no_nonrelease_files",
"ensure_secure_kernelparams",
]
for test in baseline_tests:
exec_test(
test, opts.input, [os.path.join(opts.config, f"{test}.config")]
)
# Run generic non-baseline tests.
tests = []
if opts.keyset_is_mp:
tests += [
"ensure_not_ASAN",
"ensure_not_tainted_license",
"ensure_update_verification",
]
for test in tests:
exec_test(test, opts.input, [])
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
|
|
770da0f60ec4f80425334f281fcd0abb590e6b6b
|
test/snv/writers_test.py
|
test/snv/writers_test.py
|
'''
Created on 05/03/2010
@author: peio
'''
import unittest
from franklin.seq.seqs import SeqFeature, SeqWithQuality, Seq
from Bio.SeqFeature import FeatureLocation
from franklin.snv.snv_annotation import SNP, INVARIANT
from tempfile import NamedTemporaryFile
from franklin.snv.writers import VariantCallFormatWriter
class VariantCallFormatWriterTest(unittest.TestCase):
'VariantCallFormatWrite tests'
def test_basic(self):
seq_str = 'AAA'
alleles = {('A', SNP):{'read_groups': ['hola_illumina'],
'read_names': ['seq16'],
'orientations': [True],
'qualities': [57.0],
'quality': 57.0,
'mapping_qualities': [37]},
('T', INVARIANT):{'read_groups': ['hola_illumina'],
'read_names': ['seq16'],
'orientations': [True],
'qualities': [57.0],
'quality': 57.0,
'mapping_qualities': [37]},
}
snv = SeqFeature(type='snv', location=FeatureLocation(50, 50),
qualifiers={'alleles':alleles,
'filters':{'by_kind':{SNP:True}},
'reference_allele':'T'})
seq = SeqWithQuality(seq=Seq(seq_str), qual=[30, 30, 30],
name='AT1G55265.1', features=[snv])
fhand = NamedTemporaryFile(mode='a')
writer = VariantCallFormatWriter(fhand, 'ref1')
writer.write(seq)
vcf = open(fhand.name).read()
assert 'vks' in vcf
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
Add vcf writer preliminary test
|
Add vcf writer preliminary test
|
Python
|
agpl-3.0
|
JoseBlanca/franklin,JoseBlanca/franklin
|
Add vcf writer preliminary test
|
'''
Created on 05/03/2010
@author: peio
'''
import unittest
from franklin.seq.seqs import SeqFeature, SeqWithQuality, Seq
from Bio.SeqFeature import FeatureLocation
from franklin.snv.snv_annotation import SNP, INVARIANT
from tempfile import NamedTemporaryFile
from franklin.snv.writers import VariantCallFormatWriter
class VariantCallFormatWriterTest(unittest.TestCase):
'VariantCallFormatWrite tests'
def test_basic(self):
seq_str = 'AAA'
alleles = {('A', SNP):{'read_groups': ['hola_illumina'],
'read_names': ['seq16'],
'orientations': [True],
'qualities': [57.0],
'quality': 57.0,
'mapping_qualities': [37]},
('T', INVARIANT):{'read_groups': ['hola_illumina'],
'read_names': ['seq16'],
'orientations': [True],
'qualities': [57.0],
'quality': 57.0,
'mapping_qualities': [37]},
}
snv = SeqFeature(type='snv', location=FeatureLocation(50, 50),
qualifiers={'alleles':alleles,
'filters':{'by_kind':{SNP:True}},
'reference_allele':'T'})
seq = SeqWithQuality(seq=Seq(seq_str), qual=[30, 30, 30],
name='AT1G55265.1', features=[snv])
fhand = NamedTemporaryFile(mode='a')
writer = VariantCallFormatWriter(fhand, 'ref1')
writer.write(seq)
vcf = open(fhand.name).read()
assert 'vks' in vcf
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
<commit_before><commit_msg>Add vcf writer preliminary test<commit_after>
|
'''
Created on 05/03/2010
@author: peio
'''
import unittest
from franklin.seq.seqs import SeqFeature, SeqWithQuality, Seq
from Bio.SeqFeature import FeatureLocation
from franklin.snv.snv_annotation import SNP, INVARIANT
from tempfile import NamedTemporaryFile
from franklin.snv.writers import VariantCallFormatWriter
class VariantCallFormatWriterTest(unittest.TestCase):
'VariantCallFormatWrite tests'
def test_basic(self):
seq_str = 'AAA'
alleles = {('A', SNP):{'read_groups': ['hola_illumina'],
'read_names': ['seq16'],
'orientations': [True],
'qualities': [57.0],
'quality': 57.0,
'mapping_qualities': [37]},
('T', INVARIANT):{'read_groups': ['hola_illumina'],
'read_names': ['seq16'],
'orientations': [True],
'qualities': [57.0],
'quality': 57.0,
'mapping_qualities': [37]},
}
snv = SeqFeature(type='snv', location=FeatureLocation(50, 50),
qualifiers={'alleles':alleles,
'filters':{'by_kind':{SNP:True}},
'reference_allele':'T'})
seq = SeqWithQuality(seq=Seq(seq_str), qual=[30, 30, 30],
name='AT1G55265.1', features=[snv])
fhand = NamedTemporaryFile(mode='a')
writer = VariantCallFormatWriter(fhand, 'ref1')
writer.write(seq)
vcf = open(fhand.name).read()
assert 'vks' in vcf
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
Add vcf writer preliminary test'''
Created on 05/03/2010
@author: peio
'''
import unittest
from franklin.seq.seqs import SeqFeature, SeqWithQuality, Seq
from Bio.SeqFeature import FeatureLocation
from franklin.snv.snv_annotation import SNP, INVARIANT
from tempfile import NamedTemporaryFile
from franklin.snv.writers import VariantCallFormatWriter
class VariantCallFormatWriterTest(unittest.TestCase):
'VariantCallFormatWrite tests'
def test_basic(self):
seq_str = 'AAA'
alleles = {('A', SNP):{'read_groups': ['hola_illumina'],
'read_names': ['seq16'],
'orientations': [True],
'qualities': [57.0],
'quality': 57.0,
'mapping_qualities': [37]},
('T', INVARIANT):{'read_groups': ['hola_illumina'],
'read_names': ['seq16'],
'orientations': [True],
'qualities': [57.0],
'quality': 57.0,
'mapping_qualities': [37]},
}
snv = SeqFeature(type='snv', location=FeatureLocation(50, 50),
qualifiers={'alleles':alleles,
'filters':{'by_kind':{SNP:True}},
'reference_allele':'T'})
seq = SeqWithQuality(seq=Seq(seq_str), qual=[30, 30, 30],
name='AT1G55265.1', features=[snv])
fhand = NamedTemporaryFile(mode='a')
writer = VariantCallFormatWriter(fhand, 'ref1')
writer.write(seq)
vcf = open(fhand.name).read()
assert 'vks' in vcf
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
<commit_before><commit_msg>Add vcf writer preliminary test<commit_after>'''
Created on 05/03/2010
@author: peio
'''
import unittest
from franklin.seq.seqs import SeqFeature, SeqWithQuality, Seq
from Bio.SeqFeature import FeatureLocation
from franklin.snv.snv_annotation import SNP, INVARIANT
from tempfile import NamedTemporaryFile
from franklin.snv.writers import VariantCallFormatWriter
class VariantCallFormatWriterTest(unittest.TestCase):
'VariantCallFormatWrite tests'
def test_basic(self):
seq_str = 'AAA'
alleles = {('A', SNP):{'read_groups': ['hola_illumina'],
'read_names': ['seq16'],
'orientations': [True],
'qualities': [57.0],
'quality': 57.0,
'mapping_qualities': [37]},
('T', INVARIANT):{'read_groups': ['hola_illumina'],
'read_names': ['seq16'],
'orientations': [True],
'qualities': [57.0],
'quality': 57.0,
'mapping_qualities': [37]},
}
snv = SeqFeature(type='snv', location=FeatureLocation(50, 50),
qualifiers={'alleles':alleles,
'filters':{'by_kind':{SNP:True}},
'reference_allele':'T'})
seq = SeqWithQuality(seq=Seq(seq_str), qual=[30, 30, 30],
name='AT1G55265.1', features=[snv])
fhand = NamedTemporaryFile(mode='a')
writer = VariantCallFormatWriter(fhand, 'ref1')
writer.write(seq)
vcf = open(fhand.name).read()
assert 'vks' in vcf
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
|
2227619b0358db4f2d5f707248ab2fb19652021e
|
test/test_thin_client.py
|
test/test_thin_client.py
|
from unittest import TestCase
from pythinclient.client import *
from pythinclient.server import *
import time
__author__ = 'Alek Ratzloff <alekratz@gmail.com>'
DATEFMT = "%d/%m/%Y"
def today():
return time.strftime(DATEFMT)
def server_echo(msg, conn, addr):
# Echoes the message
conn.send((msg + '\n').encode('ascii'))
def server_date(msg, conn, addr):
# Sends the current date
date = today()
conn.send((date + '\n').encode('ascii'))
class TestThinClient(TestCase):
def __init__(self, method_name):
super(TestThinClient, self).__init__(method_name)
# This is the server that we connect to
self.server = BasicThinServer()
# This is the client that connects to the server
self.client = BasicThinClient()
# Add some basic hooks for the server to handle
self.server.add_hook("echo", server_echo)
self.server.add_hook("date", server_date)
self.server.start()
def test_client_connect(self):
self.client.connect()
self.assertTrue(self.client.sock is not None)
self.client.close()
self.assertTrue(self.client.sock is None)
def test_client_send_receive(self):
now = today()
# Basic send and receive
self.client.connect()
self.client.send("echo test")
response = self.client.wait_receive()
self.assertEqual(response, "echo test")
self.client.close()
self.client.connect()
self.client.send("date")
response = self.client.wait_receive()
self.assertEqual(response, now)
self.client.close()
# One line send and receive
response = self.client.send_receive("echo test")
self.assertEqual(response, "echo test")
response = self.client.send_receive("date")
self.assertEqual(response, now)
|
Add thin client tests. They don't work yet - they just kind of hang there.
|
Add thin client tests. They don't work yet - they just kind of hang there.
|
Python
|
bsd-3-clause
|
alekratz/pythinclient
|
Add thin client tests. They don't work yet - they just kind of hang there.
|
from unittest import TestCase
from pythinclient.client import *
from pythinclient.server import *
import time
__author__ = 'Alek Ratzloff <alekratz@gmail.com>'
DATEFMT = "%d/%m/%Y"
def today():
return time.strftime(DATEFMT)
def server_echo(msg, conn, addr):
# Echoes the message
conn.send((msg + '\n').encode('ascii'))
def server_date(msg, conn, addr):
# Sends the current date
date = today()
conn.send((date + '\n').encode('ascii'))
class TestThinClient(TestCase):
def __init__(self, method_name):
super(TestThinClient, self).__init__(method_name)
# This is the server that we connect to
self.server = BasicThinServer()
# This is the client that connects to the server
self.client = BasicThinClient()
# Add some basic hooks for the server to handle
self.server.add_hook("echo", server_echo)
self.server.add_hook("date", server_date)
self.server.start()
def test_client_connect(self):
self.client.connect()
self.assertTrue(self.client.sock is not None)
self.client.close()
self.assertTrue(self.client.sock is None)
def test_client_send_receive(self):
now = today()
# Basic send and receive
self.client.connect()
self.client.send("echo test")
response = self.client.wait_receive()
self.assertEqual(response, "echo test")
self.client.close()
self.client.connect()
self.client.send("date")
response = self.client.wait_receive()
self.assertEqual(response, now)
self.client.close()
# One line send and receive
response = self.client.send_receive("echo test")
self.assertEqual(response, "echo test")
response = self.client.send_receive("date")
self.assertEqual(response, now)
|
<commit_before><commit_msg>Add thin client tests. They don't work yet - they just kind of hang there.<commit_after>
|
from unittest import TestCase
from pythinclient.client import *
from pythinclient.server import *
import time
__author__ = 'Alek Ratzloff <alekratz@gmail.com>'
DATEFMT = "%d/%m/%Y"
def today():
return time.strftime(DATEFMT)
def server_echo(msg, conn, addr):
# Echoes the message
conn.send((msg + '\n').encode('ascii'))
def server_date(msg, conn, addr):
# Sends the current date
date = today()
conn.send((date + '\n').encode('ascii'))
class TestThinClient(TestCase):
def __init__(self, method_name):
super(TestThinClient, self).__init__(method_name)
# This is the server that we connect to
self.server = BasicThinServer()
# This is the client that connects to the server
self.client = BasicThinClient()
# Add some basic hooks for the server to handle
self.server.add_hook("echo", server_echo)
self.server.add_hook("date", server_date)
self.server.start()
def test_client_connect(self):
self.client.connect()
self.assertTrue(self.client.sock is not None)
self.client.close()
self.assertTrue(self.client.sock is None)
def test_client_send_receive(self):
now = today()
# Basic send and receive
self.client.connect()
self.client.send("echo test")
response = self.client.wait_receive()
self.assertEqual(response, "echo test")
self.client.close()
self.client.connect()
self.client.send("date")
response = self.client.wait_receive()
self.assertEqual(response, now)
self.client.close()
# One line send and receive
response = self.client.send_receive("echo test")
self.assertEqual(response, "echo test")
response = self.client.send_receive("date")
self.assertEqual(response, now)
|
Add thin client tests. They don't work yet - they just kind of hang there.from unittest import TestCase
from pythinclient.client import *
from pythinclient.server import *
import time
__author__ = 'Alek Ratzloff <alekratz@gmail.com>'
DATEFMT = "%d/%m/%Y"
def today():
return time.strftime(DATEFMT)
def server_echo(msg, conn, addr):
# Echoes the message
conn.send((msg + '\n').encode('ascii'))
def server_date(msg, conn, addr):
# Sends the current date
date = today()
conn.send((date + '\n').encode('ascii'))
class TestThinClient(TestCase):
def __init__(self, method_name):
super(TestThinClient, self).__init__(method_name)
# This is the server that we connect to
self.server = BasicThinServer()
# This is the client that connects to the server
self.client = BasicThinClient()
# Add some basic hooks for the server to handle
self.server.add_hook("echo", server_echo)
self.server.add_hook("date", server_date)
self.server.start()
def test_client_connect(self):
self.client.connect()
self.assertTrue(self.client.sock is not None)
self.client.close()
self.assertTrue(self.client.sock is None)
def test_client_send_receive(self):
now = today()
# Basic send and receive
self.client.connect()
self.client.send("echo test")
response = self.client.wait_receive()
self.assertEqual(response, "echo test")
self.client.close()
self.client.connect()
self.client.send("date")
response = self.client.wait_receive()
self.assertEqual(response, now)
self.client.close()
# One line send and receive
response = self.client.send_receive("echo test")
self.assertEqual(response, "echo test")
response = self.client.send_receive("date")
self.assertEqual(response, now)
|
<commit_before><commit_msg>Add thin client tests. They don't work yet - they just kind of hang there.<commit_after>from unittest import TestCase
from pythinclient.client import *
from pythinclient.server import *
import time
__author__ = 'Alek Ratzloff <alekratz@gmail.com>'
DATEFMT = "%d/%m/%Y"
def today():
return time.strftime(DATEFMT)
def server_echo(msg, conn, addr):
# Echoes the message
conn.send((msg + '\n').encode('ascii'))
def server_date(msg, conn, addr):
# Sends the current date
date = today()
conn.send((date + '\n').encode('ascii'))
class TestThinClient(TestCase):
def __init__(self, method_name):
super(TestThinClient, self).__init__(method_name)
# This is the server that we connect to
self.server = BasicThinServer()
# This is the client that connects to the server
self.client = BasicThinClient()
# Add some basic hooks for the server to handle
self.server.add_hook("echo", server_echo)
self.server.add_hook("date", server_date)
self.server.start()
def test_client_connect(self):
self.client.connect()
self.assertTrue(self.client.sock is not None)
self.client.close()
self.assertTrue(self.client.sock is None)
def test_client_send_receive(self):
now = today()
# Basic send and receive
self.client.connect()
self.client.send("echo test")
response = self.client.wait_receive()
self.assertEqual(response, "echo test")
self.client.close()
self.client.connect()
self.client.send("date")
response = self.client.wait_receive()
self.assertEqual(response, now)
self.client.close()
# One line send and receive
response = self.client.send_receive("echo test")
self.assertEqual(response, "echo test")
response = self.client.send_receive("date")
self.assertEqual(response, now)
|
|
80e7b007bdb731fb8f5d069230a3c5be09a0ec50
|
mz-data.py
|
mz-data.py
|
# IPython log file
import numpy as np
import napari
from matplotlib.backends.backend_qt5agg import FigureCanvas
from matplotlib.figure import Figure
# load the data
cube = np.load('datacube.npy')
peaks = np.load('peaklist.npy')
mz = peaks[0]
thresh = np.load('hsr_thresholds.npy')
cubet = np.transpose(cube, (2, 0, 1))
cubet_norm = cubet / thresh[:, np.newaxis, np.newaxis]
# create the viewer
viewer = napari.view_image(cubet_norm)
# create the intensity plot
with plt.style.context('dark_background'):
mz_canvas = FigureCanvas(Figure(figsize=(5, 3)))
mz_axes = mz_canvas.figure.subplots()
intensities = cube[0, 0, :]
intensity_line = mz_axes.plot(mz, intensities)[0] # returns line list
position_line = mz_axes.axvline(x=mz[0], c='C1')
position_line.set_zorder(-1) # keep the spectra in front
minval, maxval = np.min(cube), np.max(cube)
range_ = maxval - minval
centre = (maxval + minval) / 2
min_y = centre - 1.05 * range_ / 2
max_y = centre + 1.05 * range_ / 2
mz_axes.set_ylim(min_y, max_y)
mz_axes.set_xlabel('m/z')
mz_axes.set_ylabel('intensity')
title = mz_axes.set_title('(0, 0)')
mz_canvas.figure.tight_layout()
# add the plot to the viewer
viewer.window.add_dock_widget(mz_canvas)
# create a function to update the plot
def update_plot(axis_event):
axis = axis_event.axis
if axis != 0:
return
slice_num = axis_event.value
x = mz[slice_num]
position_line.set_data([x, x], [0, 1])
mz_canvas.draw_idle()
# connect the function to the dims axis
viewer.dims.events.axis.connect(update_plot)
# grab the image layer
layer = viewer.layers[0]
# add a click callback to the layer to update the spectrum being viewed
@layer.mouse_drag_callbacks.append
def update_intensity(layer, event):
xs, ys = intensity_line.get_data()
coords_full = tuple(np.round(layer.coordinates).astype(int))
if all(coords_full[i] in range(cubet.shape[i]) for i in range(cubet.ndim)):
coords = coords_full[1:] # rows, columns
new_ys = cube[coords]
intensity_line.set_data(xs, new_ys)
title.set_text(str(coords))
mz_canvas.draw_idle()
|
Add script to view mass spec images
|
Add script to view mass spec images
|
Python
|
bsd-3-clause
|
jni/useful-histories
|
Add script to view mass spec images
|
# IPython log file
import numpy as np
import napari
from matplotlib.backends.backend_qt5agg import FigureCanvas
from matplotlib.figure import Figure
# load the data
cube = np.load('datacube.npy')
peaks = np.load('peaklist.npy')
mz = peaks[0]
thresh = np.load('hsr_thresholds.npy')
cubet = np.transpose(cube, (2, 0, 1))
cubet_norm = cubet / thresh[:, np.newaxis, np.newaxis]
# create the viewer
viewer = napari.view_image(cubet_norm)
# create the intensity plot
with plt.style.context('dark_background'):
mz_canvas = FigureCanvas(Figure(figsize=(5, 3)))
mz_axes = mz_canvas.figure.subplots()
intensities = cube[0, 0, :]
intensity_line = mz_axes.plot(mz, intensities)[0] # returns line list
position_line = mz_axes.axvline(x=mz[0], c='C1')
position_line.set_zorder(-1) # keep the spectra in front
minval, maxval = np.min(cube), np.max(cube)
range_ = maxval - minval
centre = (maxval + minval) / 2
min_y = centre - 1.05 * range_ / 2
max_y = centre + 1.05 * range_ / 2
mz_axes.set_ylim(min_y, max_y)
mz_axes.set_xlabel('m/z')
mz_axes.set_ylabel('intensity')
title = mz_axes.set_title('(0, 0)')
mz_canvas.figure.tight_layout()
# add the plot to the viewer
viewer.window.add_dock_widget(mz_canvas)
# create a function to update the plot
def update_plot(axis_event):
axis = axis_event.axis
if axis != 0:
return
slice_num = axis_event.value
x = mz[slice_num]
position_line.set_data([x, x], [0, 1])
mz_canvas.draw_idle()
# connect the function to the dims axis
viewer.dims.events.axis.connect(update_plot)
# grab the image layer
layer = viewer.layers[0]
# add a click callback to the layer to update the spectrum being viewed
@layer.mouse_drag_callbacks.append
def update_intensity(layer, event):
xs, ys = intensity_line.get_data()
coords_full = tuple(np.round(layer.coordinates).astype(int))
if all(coords_full[i] in range(cubet.shape[i]) for i in range(cubet.ndim)):
coords = coords_full[1:] # rows, columns
new_ys = cube[coords]
intensity_line.set_data(xs, new_ys)
title.set_text(str(coords))
mz_canvas.draw_idle()
|
<commit_before><commit_msg>Add script to view mass spec images<commit_after>
|
# IPython log file
import numpy as np
import napari
from matplotlib.backends.backend_qt5agg import FigureCanvas
from matplotlib.figure import Figure
# load the data
cube = np.load('datacube.npy')
peaks = np.load('peaklist.npy')
mz = peaks[0]
thresh = np.load('hsr_thresholds.npy')
cubet = np.transpose(cube, (2, 0, 1))
cubet_norm = cubet / thresh[:, np.newaxis, np.newaxis]
# create the viewer
viewer = napari.view_image(cubet_norm)
# create the intensity plot
with plt.style.context('dark_background'):
mz_canvas = FigureCanvas(Figure(figsize=(5, 3)))
mz_axes = mz_canvas.figure.subplots()
intensities = cube[0, 0, :]
intensity_line = mz_axes.plot(mz, intensities)[0] # returns line list
position_line = mz_axes.axvline(x=mz[0], c='C1')
position_line.set_zorder(-1) # keep the spectra in front
minval, maxval = np.min(cube), np.max(cube)
range_ = maxval - minval
centre = (maxval + minval) / 2
min_y = centre - 1.05 * range_ / 2
max_y = centre + 1.05 * range_ / 2
mz_axes.set_ylim(min_y, max_y)
mz_axes.set_xlabel('m/z')
mz_axes.set_ylabel('intensity')
title = mz_axes.set_title('(0, 0)')
mz_canvas.figure.tight_layout()
# add the plot to the viewer
viewer.window.add_dock_widget(mz_canvas)
# create a function to update the plot
def update_plot(axis_event):
axis = axis_event.axis
if axis != 0:
return
slice_num = axis_event.value
x = mz[slice_num]
position_line.set_data([x, x], [0, 1])
mz_canvas.draw_idle()
# connect the function to the dims axis
viewer.dims.events.axis.connect(update_plot)
# grab the image layer
layer = viewer.layers[0]
# add a click callback to the layer to update the spectrum being viewed
@layer.mouse_drag_callbacks.append
def update_intensity(layer, event):
xs, ys = intensity_line.get_data()
coords_full = tuple(np.round(layer.coordinates).astype(int))
if all(coords_full[i] in range(cubet.shape[i]) for i in range(cubet.ndim)):
coords = coords_full[1:] # rows, columns
new_ys = cube[coords]
intensity_line.set_data(xs, new_ys)
title.set_text(str(coords))
mz_canvas.draw_idle()
|
Add script to view mass spec images# IPython log file
import numpy as np
import napari
from matplotlib.backends.backend_qt5agg import FigureCanvas
from matplotlib.figure import Figure
# load the data
cube = np.load('datacube.npy')
peaks = np.load('peaklist.npy')
mz = peaks[0]
thresh = np.load('hsr_thresholds.npy')
cubet = np.transpose(cube, (2, 0, 1))
cubet_norm = cubet / thresh[:, np.newaxis, np.newaxis]
# create the viewer
viewer = napari.view_image(cubet_norm)
# create the intensity plot
with plt.style.context('dark_background'):
mz_canvas = FigureCanvas(Figure(figsize=(5, 3)))
mz_axes = mz_canvas.figure.subplots()
intensities = cube[0, 0, :]
intensity_line = mz_axes.plot(mz, intensities)[0] # returns line list
position_line = mz_axes.axvline(x=mz[0], c='C1')
position_line.set_zorder(-1) # keep the spectra in front
minval, maxval = np.min(cube), np.max(cube)
range_ = maxval - minval
centre = (maxval + minval) / 2
min_y = centre - 1.05 * range_ / 2
max_y = centre + 1.05 * range_ / 2
mz_axes.set_ylim(min_y, max_y)
mz_axes.set_xlabel('m/z')
mz_axes.set_ylabel('intensity')
title = mz_axes.set_title('(0, 0)')
mz_canvas.figure.tight_layout()
# add the plot to the viewer
viewer.window.add_dock_widget(mz_canvas)
# create a function to update the plot
def update_plot(axis_event):
axis = axis_event.axis
if axis != 0:
return
slice_num = axis_event.value
x = mz[slice_num]
position_line.set_data([x, x], [0, 1])
mz_canvas.draw_idle()
# connect the function to the dims axis
viewer.dims.events.axis.connect(update_plot)
# grab the image layer
layer = viewer.layers[0]
# add a click callback to the layer to update the spectrum being viewed
@layer.mouse_drag_callbacks.append
def update_intensity(layer, event):
xs, ys = intensity_line.get_data()
coords_full = tuple(np.round(layer.coordinates).astype(int))
if all(coords_full[i] in range(cubet.shape[i]) for i in range(cubet.ndim)):
coords = coords_full[1:] # rows, columns
new_ys = cube[coords]
intensity_line.set_data(xs, new_ys)
title.set_text(str(coords))
mz_canvas.draw_idle()
|
<commit_before><commit_msg>Add script to view mass spec images<commit_after># IPython log file
import numpy as np
import napari
from matplotlib.backends.backend_qt5agg import FigureCanvas
from matplotlib.figure import Figure
# load the data
cube = np.load('datacube.npy')
peaks = np.load('peaklist.npy')
mz = peaks[0]
thresh = np.load('hsr_thresholds.npy')
cubet = np.transpose(cube, (2, 0, 1))
cubet_norm = cubet / thresh[:, np.newaxis, np.newaxis]
# create the viewer
viewer = napari.view_image(cubet_norm)
# create the intensity plot
with plt.style.context('dark_background'):
mz_canvas = FigureCanvas(Figure(figsize=(5, 3)))
mz_axes = mz_canvas.figure.subplots()
intensities = cube[0, 0, :]
intensity_line = mz_axes.plot(mz, intensities)[0] # returns line list
position_line = mz_axes.axvline(x=mz[0], c='C1')
position_line.set_zorder(-1) # keep the spectra in front
minval, maxval = np.min(cube), np.max(cube)
range_ = maxval - minval
centre = (maxval + minval) / 2
min_y = centre - 1.05 * range_ / 2
max_y = centre + 1.05 * range_ / 2
mz_axes.set_ylim(min_y, max_y)
mz_axes.set_xlabel('m/z')
mz_axes.set_ylabel('intensity')
title = mz_axes.set_title('(0, 0)')
mz_canvas.figure.tight_layout()
# add the plot to the viewer
viewer.window.add_dock_widget(mz_canvas)
# create a function to update the plot
def update_plot(axis_event):
axis = axis_event.axis
if axis != 0:
return
slice_num = axis_event.value
x = mz[slice_num]
position_line.set_data([x, x], [0, 1])
mz_canvas.draw_idle()
# connect the function to the dims axis
viewer.dims.events.axis.connect(update_plot)
# grab the image layer
layer = viewer.layers[0]
# add a click callback to the layer to update the spectrum being viewed
@layer.mouse_drag_callbacks.append
def update_intensity(layer, event):
xs, ys = intensity_line.get_data()
coords_full = tuple(np.round(layer.coordinates).astype(int))
if all(coords_full[i] in range(cubet.shape[i]) for i in range(cubet.ndim)):
coords = coords_full[1:] # rows, columns
new_ys = cube[coords]
intensity_line.set_data(xs, new_ys)
title.set_text(str(coords))
mz_canvas.draw_idle()
|
|
54c7d5704a148b703fb0db74eeeb66c187134faf
|
gratipay/utils/pricing.py
|
gratipay/utils/pricing.py
|
from decimal import Decimal as D, ROUND_HALF_EVEN
def suggested_payment(usage):
if usage >= 500:
percentage = D('0.02')
elif usage >= 20:
percentage = D('0.05')
else:
percentage = D('0.10')
suggestion = usage * percentage
if suggestion == 0:
rounded = suggestion
elif suggestion < 0.25:
rounded = D('0.25')
elif suggestion < 0.50:
rounded = D('0.50')
elif suggestion < 1:
rounded = D('1.00')
else:
rounded = suggestion.quantize(D('0'), ROUND_HALF_EVEN)
return rounded
def suggested_payment_low_high(usage):
# Above $500/wk we suggest 2%.
if usage >= 5000:
low = D('100.00')
high = D('1000.00')
elif usage >= 500:
low = D('10.00')
high = D('100.00')
# From $20 to $499 we suggest 5%.
elif usage >= 100:
low = D('5.00')
high = D('25.00')
elif usage >= 20:
low = D('1.00')
high = D('5.00')
# Below $20 we suggest 10%.
elif usage >= 5:
low = D('0.50')
high = D('2.00')
else:
low = D('0.10')
high = D('1.00')
return low, high
|
from decimal import Decimal as D, ROUND_HALF_EVEN
def suggested_payment(usage):
percentage = D('0.05')
suggestion = usage * percentage
rounded = suggestion.quantize(D('0'), ROUND_HALF_EVEN)
return rounded
def suggested_payment_low_high(usage):
# Above $500/wk we suggest 2%.
if usage >= 5000:
low = D('100.00')
high = D('1000.00')
elif usage >= 500:
low = D('10.00')
high = D('100.00')
# From $20 to $499 we suggest 5%.
elif usage >= 100:
low = D('5.00')
high = D('25.00')
elif usage >= 20:
low = D('1.00')
high = D('5.00')
# Below $20 we suggest 10%.
elif usage >= 5:
low = D('0.50')
high = D('2.00')
else:
low = D('0.10')
high = D('1.00')
return low, high
|
Change suggested payment to flat 5%
|
Change suggested payment to flat 5%
|
Python
|
mit
|
eXcomm/gratipay.com,eXcomm/gratipay.com,gratipay/gratipay.com,gratipay/gratipay.com,gratipay/gratipay.com,studio666/gratipay.com,eXcomm/gratipay.com,studio666/gratipay.com,studio666/gratipay.com,gratipay/gratipay.com,eXcomm/gratipay.com,studio666/gratipay.com
|
from decimal import Decimal as D, ROUND_HALF_EVEN
def suggested_payment(usage):
if usage >= 500:
percentage = D('0.02')
elif usage >= 20:
percentage = D('0.05')
else:
percentage = D('0.10')
suggestion = usage * percentage
if suggestion == 0:
rounded = suggestion
elif suggestion < 0.25:
rounded = D('0.25')
elif suggestion < 0.50:
rounded = D('0.50')
elif suggestion < 1:
rounded = D('1.00')
else:
rounded = suggestion.quantize(D('0'), ROUND_HALF_EVEN)
return rounded
def suggested_payment_low_high(usage):
# Above $500/wk we suggest 2%.
if usage >= 5000:
low = D('100.00')
high = D('1000.00')
elif usage >= 500:
low = D('10.00')
high = D('100.00')
# From $20 to $499 we suggest 5%.
elif usage >= 100:
low = D('5.00')
high = D('25.00')
elif usage >= 20:
low = D('1.00')
high = D('5.00')
# Below $20 we suggest 10%.
elif usage >= 5:
low = D('0.50')
high = D('2.00')
else:
low = D('0.10')
high = D('1.00')
return low, high
Change suggested payment to flat 5%
|
from decimal import Decimal as D, ROUND_HALF_EVEN
def suggested_payment(usage):
percentage = D('0.05')
suggestion = usage * percentage
rounded = suggestion.quantize(D('0'), ROUND_HALF_EVEN)
return rounded
def suggested_payment_low_high(usage):
# Above $500/wk we suggest 2%.
if usage >= 5000:
low = D('100.00')
high = D('1000.00')
elif usage >= 500:
low = D('10.00')
high = D('100.00')
# From $20 to $499 we suggest 5%.
elif usage >= 100:
low = D('5.00')
high = D('25.00')
elif usage >= 20:
low = D('1.00')
high = D('5.00')
# Below $20 we suggest 10%.
elif usage >= 5:
low = D('0.50')
high = D('2.00')
else:
low = D('0.10')
high = D('1.00')
return low, high
|
<commit_before>from decimal import Decimal as D, ROUND_HALF_EVEN
def suggested_payment(usage):
if usage >= 500:
percentage = D('0.02')
elif usage >= 20:
percentage = D('0.05')
else:
percentage = D('0.10')
suggestion = usage * percentage
if suggestion == 0:
rounded = suggestion
elif suggestion < 0.25:
rounded = D('0.25')
elif suggestion < 0.50:
rounded = D('0.50')
elif suggestion < 1:
rounded = D('1.00')
else:
rounded = suggestion.quantize(D('0'), ROUND_HALF_EVEN)
return rounded
def suggested_payment_low_high(usage):
# Above $500/wk we suggest 2%.
if usage >= 5000:
low = D('100.00')
high = D('1000.00')
elif usage >= 500:
low = D('10.00')
high = D('100.00')
# From $20 to $499 we suggest 5%.
elif usage >= 100:
low = D('5.00')
high = D('25.00')
elif usage >= 20:
low = D('1.00')
high = D('5.00')
# Below $20 we suggest 10%.
elif usage >= 5:
low = D('0.50')
high = D('2.00')
else:
low = D('0.10')
high = D('1.00')
return low, high
<commit_msg>Change suggested payment to flat 5%<commit_after>
|
from decimal import Decimal as D, ROUND_HALF_EVEN
def suggested_payment(usage):
percentage = D('0.05')
suggestion = usage * percentage
rounded = suggestion.quantize(D('0'), ROUND_HALF_EVEN)
return rounded
def suggested_payment_low_high(usage):
# Above $500/wk we suggest 2%.
if usage >= 5000:
low = D('100.00')
high = D('1000.00')
elif usage >= 500:
low = D('10.00')
high = D('100.00')
# From $20 to $499 we suggest 5%.
elif usage >= 100:
low = D('5.00')
high = D('25.00')
elif usage >= 20:
low = D('1.00')
high = D('5.00')
# Below $20 we suggest 10%.
elif usage >= 5:
low = D('0.50')
high = D('2.00')
else:
low = D('0.10')
high = D('1.00')
return low, high
|
from decimal import Decimal as D, ROUND_HALF_EVEN
def suggested_payment(usage):
if usage >= 500:
percentage = D('0.02')
elif usage >= 20:
percentage = D('0.05')
else:
percentage = D('0.10')
suggestion = usage * percentage
if suggestion == 0:
rounded = suggestion
elif suggestion < 0.25:
rounded = D('0.25')
elif suggestion < 0.50:
rounded = D('0.50')
elif suggestion < 1:
rounded = D('1.00')
else:
rounded = suggestion.quantize(D('0'), ROUND_HALF_EVEN)
return rounded
def suggested_payment_low_high(usage):
# Above $500/wk we suggest 2%.
if usage >= 5000:
low = D('100.00')
high = D('1000.00')
elif usage >= 500:
low = D('10.00')
high = D('100.00')
# From $20 to $499 we suggest 5%.
elif usage >= 100:
low = D('5.00')
high = D('25.00')
elif usage >= 20:
low = D('1.00')
high = D('5.00')
# Below $20 we suggest 10%.
elif usage >= 5:
low = D('0.50')
high = D('2.00')
else:
low = D('0.10')
high = D('1.00')
return low, high
Change suggested payment to flat 5%from decimal import Decimal as D, ROUND_HALF_EVEN
def suggested_payment(usage):
percentage = D('0.05')
suggestion = usage * percentage
rounded = suggestion.quantize(D('0'), ROUND_HALF_EVEN)
return rounded
def suggested_payment_low_high(usage):
# Above $500/wk we suggest 2%.
if usage >= 5000:
low = D('100.00')
high = D('1000.00')
elif usage >= 500:
low = D('10.00')
high = D('100.00')
# From $20 to $499 we suggest 5%.
elif usage >= 100:
low = D('5.00')
high = D('25.00')
elif usage >= 20:
low = D('1.00')
high = D('5.00')
# Below $20 we suggest 10%.
elif usage >= 5:
low = D('0.50')
high = D('2.00')
else:
low = D('0.10')
high = D('1.00')
return low, high
|
<commit_before>from decimal import Decimal as D, ROUND_HALF_EVEN
def suggested_payment(usage):
if usage >= 500:
percentage = D('0.02')
elif usage >= 20:
percentage = D('0.05')
else:
percentage = D('0.10')
suggestion = usage * percentage
if suggestion == 0:
rounded = suggestion
elif suggestion < 0.25:
rounded = D('0.25')
elif suggestion < 0.50:
rounded = D('0.50')
elif suggestion < 1:
rounded = D('1.00')
else:
rounded = suggestion.quantize(D('0'), ROUND_HALF_EVEN)
return rounded
def suggested_payment_low_high(usage):
# Above $500/wk we suggest 2%.
if usage >= 5000:
low = D('100.00')
high = D('1000.00')
elif usage >= 500:
low = D('10.00')
high = D('100.00')
# From $20 to $499 we suggest 5%.
elif usage >= 100:
low = D('5.00')
high = D('25.00')
elif usage >= 20:
low = D('1.00')
high = D('5.00')
# Below $20 we suggest 10%.
elif usage >= 5:
low = D('0.50')
high = D('2.00')
else:
low = D('0.10')
high = D('1.00')
return low, high
<commit_msg>Change suggested payment to flat 5%<commit_after>from decimal import Decimal as D, ROUND_HALF_EVEN
def suggested_payment(usage):
percentage = D('0.05')
suggestion = usage * percentage
rounded = suggestion.quantize(D('0'), ROUND_HALF_EVEN)
return rounded
def suggested_payment_low_high(usage):
# Above $500/wk we suggest 2%.
if usage >= 5000:
low = D('100.00')
high = D('1000.00')
elif usage >= 500:
low = D('10.00')
high = D('100.00')
# From $20 to $499 we suggest 5%.
elif usage >= 100:
low = D('5.00')
high = D('25.00')
elif usage >= 20:
low = D('1.00')
high = D('5.00')
# Below $20 we suggest 10%.
elif usage >= 5:
low = D('0.50')
high = D('2.00')
else:
low = D('0.10')
high = D('1.00')
return low, high
|
b97a5108c26f3e6b4f4a57c9148393e0ba13cfb4
|
tests/test_redis/test_redis_ephemeral_tokens.py
|
tests/test_redis/test_redis_ephemeral_tokens.py
|
import pytest
from libs.redis_db import RedisEphemeralTokens
from tests.utils import BaseTest
@pytest.mark.redis_mark
class TestRedisEphemeralTokens(BaseTest):
def test_objects(self):
token = RedisEphemeralTokens()
assert token.key is not None
assert token.redis_key == RedisEphemeralTokens.KEY_EPHEMERAL_TOKENS.format(token.key)
assert token.get_state() is None
assert token.salt is None
assert token.ttl is None
assert token.scope is None
token = RedisEphemeralTokens.generate(scope=token.get_scope('user', 'experiment', 1))
assert token.get_state() is not None
assert token.salt is not None
assert token.ttl == RedisEphemeralTokens.EXPIRATION_TTL
assert token.scope == token.get_scope('user', 'experiment', 1)
assert token.check_token('foo') is False
# Checking delete the token
assert token.get_state() is None
token = RedisEphemeralTokens.generate(scope=token.get_scope('user', 'experiment', 1))
assert token.check_token(None) is False
# Checking delete the token
assert token.get_state() is None
token = RedisEphemeralTokens.generate(scope=token.get_scope('user', 'experiment', 1))
valid = RedisEphemeralTokens.make_token(token)
assert token.check_token(valid) is True
# Checking delete the token
assert token.get_state() is None
assert token.salt is None
assert token.ttl is None
assert token.scope is None
|
Add ephemeral token redis tests
|
Add ephemeral token redis tests
|
Python
|
apache-2.0
|
polyaxon/polyaxon,polyaxon/polyaxon,polyaxon/polyaxon
|
Add ephemeral token redis tests
|
import pytest
from libs.redis_db import RedisEphemeralTokens
from tests.utils import BaseTest
@pytest.mark.redis_mark
class TestRedisEphemeralTokens(BaseTest):
def test_objects(self):
token = RedisEphemeralTokens()
assert token.key is not None
assert token.redis_key == RedisEphemeralTokens.KEY_EPHEMERAL_TOKENS.format(token.key)
assert token.get_state() is None
assert token.salt is None
assert token.ttl is None
assert token.scope is None
token = RedisEphemeralTokens.generate(scope=token.get_scope('user', 'experiment', 1))
assert token.get_state() is not None
assert token.salt is not None
assert token.ttl == RedisEphemeralTokens.EXPIRATION_TTL
assert token.scope == token.get_scope('user', 'experiment', 1)
assert token.check_token('foo') is False
# Checking delete the token
assert token.get_state() is None
token = RedisEphemeralTokens.generate(scope=token.get_scope('user', 'experiment', 1))
assert token.check_token(None) is False
# Checking delete the token
assert token.get_state() is None
token = RedisEphemeralTokens.generate(scope=token.get_scope('user', 'experiment', 1))
valid = RedisEphemeralTokens.make_token(token)
assert token.check_token(valid) is True
# Checking delete the token
assert token.get_state() is None
assert token.salt is None
assert token.ttl is None
assert token.scope is None
|
<commit_before><commit_msg>Add ephemeral token redis tests<commit_after>
|
import pytest
from libs.redis_db import RedisEphemeralTokens
from tests.utils import BaseTest
@pytest.mark.redis_mark
class TestRedisEphemeralTokens(BaseTest):
def test_objects(self):
token = RedisEphemeralTokens()
assert token.key is not None
assert token.redis_key == RedisEphemeralTokens.KEY_EPHEMERAL_TOKENS.format(token.key)
assert token.get_state() is None
assert token.salt is None
assert token.ttl is None
assert token.scope is None
token = RedisEphemeralTokens.generate(scope=token.get_scope('user', 'experiment', 1))
assert token.get_state() is not None
assert token.salt is not None
assert token.ttl == RedisEphemeralTokens.EXPIRATION_TTL
assert token.scope == token.get_scope('user', 'experiment', 1)
assert token.check_token('foo') is False
# Checking delete the token
assert token.get_state() is None
token = RedisEphemeralTokens.generate(scope=token.get_scope('user', 'experiment', 1))
assert token.check_token(None) is False
# Checking delete the token
assert token.get_state() is None
token = RedisEphemeralTokens.generate(scope=token.get_scope('user', 'experiment', 1))
valid = RedisEphemeralTokens.make_token(token)
assert token.check_token(valid) is True
# Checking delete the token
assert token.get_state() is None
assert token.salt is None
assert token.ttl is None
assert token.scope is None
|
Add ephemeral token redis testsimport pytest
from libs.redis_db import RedisEphemeralTokens
from tests.utils import BaseTest
@pytest.mark.redis_mark
class TestRedisEphemeralTokens(BaseTest):
def test_objects(self):
token = RedisEphemeralTokens()
assert token.key is not None
assert token.redis_key == RedisEphemeralTokens.KEY_EPHEMERAL_TOKENS.format(token.key)
assert token.get_state() is None
assert token.salt is None
assert token.ttl is None
assert token.scope is None
token = RedisEphemeralTokens.generate(scope=token.get_scope('user', 'experiment', 1))
assert token.get_state() is not None
assert token.salt is not None
assert token.ttl == RedisEphemeralTokens.EXPIRATION_TTL
assert token.scope == token.get_scope('user', 'experiment', 1)
assert token.check_token('foo') is False
# Checking delete the token
assert token.get_state() is None
token = RedisEphemeralTokens.generate(scope=token.get_scope('user', 'experiment', 1))
assert token.check_token(None) is False
# Checking delete the token
assert token.get_state() is None
token = RedisEphemeralTokens.generate(scope=token.get_scope('user', 'experiment', 1))
valid = RedisEphemeralTokens.make_token(token)
assert token.check_token(valid) is True
# Checking delete the token
assert token.get_state() is None
assert token.salt is None
assert token.ttl is None
assert token.scope is None
|
<commit_before><commit_msg>Add ephemeral token redis tests<commit_after>import pytest
from libs.redis_db import RedisEphemeralTokens
from tests.utils import BaseTest
@pytest.mark.redis_mark
class TestRedisEphemeralTokens(BaseTest):
def test_objects(self):
token = RedisEphemeralTokens()
assert token.key is not None
assert token.redis_key == RedisEphemeralTokens.KEY_EPHEMERAL_TOKENS.format(token.key)
assert token.get_state() is None
assert token.salt is None
assert token.ttl is None
assert token.scope is None
token = RedisEphemeralTokens.generate(scope=token.get_scope('user', 'experiment', 1))
assert token.get_state() is not None
assert token.salt is not None
assert token.ttl == RedisEphemeralTokens.EXPIRATION_TTL
assert token.scope == token.get_scope('user', 'experiment', 1)
assert token.check_token('foo') is False
# Checking delete the token
assert token.get_state() is None
token = RedisEphemeralTokens.generate(scope=token.get_scope('user', 'experiment', 1))
assert token.check_token(None) is False
# Checking delete the token
assert token.get_state() is None
token = RedisEphemeralTokens.generate(scope=token.get_scope('user', 'experiment', 1))
valid = RedisEphemeralTokens.make_token(token)
assert token.check_token(valid) is True
# Checking delete the token
assert token.get_state() is None
assert token.salt is None
assert token.ttl is None
assert token.scope is None
|
|
193e2d520ead6aef22997de2f7ebbcc9b7cec587
|
pandachecker.py
|
pandachecker.py
|
"""
Program will ping the list of website to see if they are online.
Will give an error message if the website is down.
"""
import subprocess
addressListA = ['baseness.com',
'npr.org',
'ucsc.edu',
'google.com',
'facebook.com',
'myeconlab.com',
'instagram.com',
'wikipedia.com',
'nytimes.com', 'netflix.com'
];
# subprocess.run(args, *, stdin=None, input=None, stdout=None, stderr=None, shell=False, cwd=None, timeout=None, check=False, encoding=None, errors=None)
for anAddress in addressListA:
subprocess.run(["ping", "-c", "1", anAddress ], check=True)
subprocess.check_output(["echo", "Hello World!"])
print('please work')
|
Add python code for pinging
|
Add python code for pinging
|
Python
|
unlicense
|
ahandsel/pandachecker
|
Add python code for pinging
|
"""
Program will ping the list of website to see if they are online.
Will give an error message if the website is down.
"""
import subprocess
addressListA = ['baseness.com',
'npr.org',
'ucsc.edu',
'google.com',
'facebook.com',
'myeconlab.com',
'instagram.com',
'wikipedia.com',
'nytimes.com', 'netflix.com'
];
# subprocess.run(args, *, stdin=None, input=None, stdout=None, stderr=None, shell=False, cwd=None, timeout=None, check=False, encoding=None, errors=None)
for anAddress in addressListA:
subprocess.run(["ping", "-c", "1", anAddress ], check=True)
subprocess.check_output(["echo", "Hello World!"])
print('please work')
|
<commit_before><commit_msg>Add python code for pinging<commit_after>
|
"""
Program will ping the list of website to see if they are online.
Will give an error message if the website is down.
"""
import subprocess
addressListA = ['baseness.com',
'npr.org',
'ucsc.edu',
'google.com',
'facebook.com',
'myeconlab.com',
'instagram.com',
'wikipedia.com',
'nytimes.com', 'netflix.com'
];
# subprocess.run(args, *, stdin=None, input=None, stdout=None, stderr=None, shell=False, cwd=None, timeout=None, check=False, encoding=None, errors=None)
for anAddress in addressListA:
subprocess.run(["ping", "-c", "1", anAddress ], check=True)
subprocess.check_output(["echo", "Hello World!"])
print('please work')
|
Add python code for pinging"""
Program will ping the list of website to see if they are online.
Will give an error message if the website is down.
"""
import subprocess
addressListA = ['baseness.com',
'npr.org',
'ucsc.edu',
'google.com',
'facebook.com',
'myeconlab.com',
'instagram.com',
'wikipedia.com',
'nytimes.com', 'netflix.com'
];
# subprocess.run(args, *, stdin=None, input=None, stdout=None, stderr=None, shell=False, cwd=None, timeout=None, check=False, encoding=None, errors=None)
for anAddress in addressListA:
subprocess.run(["ping", "-c", "1", anAddress ], check=True)
subprocess.check_output(["echo", "Hello World!"])
print('please work')
|
<commit_before><commit_msg>Add python code for pinging<commit_after>"""
Program will ping the list of website to see if they are online.
Will give an error message if the website is down.
"""
import subprocess
addressListA = ['baseness.com',
'npr.org',
'ucsc.edu',
'google.com',
'facebook.com',
'myeconlab.com',
'instagram.com',
'wikipedia.com',
'nytimes.com', 'netflix.com'
];
# subprocess.run(args, *, stdin=None, input=None, stdout=None, stderr=None, shell=False, cwd=None, timeout=None, check=False, encoding=None, errors=None)
for anAddress in addressListA:
subprocess.run(["ping", "-c", "1", anAddress ], check=True)
subprocess.check_output(["echo", "Hello World!"])
print('please work')
|
|
d16a3753d73714a51fbe846e45fe77d5e41cb2ab
|
examples/dup_and_replay.py
|
examples/dup_and_replay.py
|
from mitmproxy import ctx
def request(flow):
f = ctx.master.state.duplicate_flow(flow)
f.request.path = "/changed"
ctx.master.replay_request(f, block=True, run_scripthooks=False)
|
from mitmproxy import ctx
def request(flow):
f = ctx.master.state.duplicate_flow(flow)
f.request.path = "/changed"
ctx.master.replay_request(f, block=True)
|
Remove dead run_scripthooks example reference.
|
Remove dead run_scripthooks example reference.
|
Python
|
mit
|
xaxa89/mitmproxy,Kriechi/mitmproxy,cortesi/mitmproxy,zlorb/mitmproxy,dwfreed/mitmproxy,ujjwal96/mitmproxy,vhaupert/mitmproxy,mosajjal/mitmproxy,laurmurclar/mitmproxy,ujjwal96/mitmproxy,Kriechi/mitmproxy,xaxa89/mitmproxy,MatthewShao/mitmproxy,dwfreed/mitmproxy,mosajjal/mitmproxy,mitmproxy/mitmproxy,ddworken/mitmproxy,mosajjal/mitmproxy,mhils/mitmproxy,mitmproxy/mitmproxy,mosajjal/mitmproxy,mitmproxy/mitmproxy,zlorb/mitmproxy,xaxa89/mitmproxy,MatthewShao/mitmproxy,dwfreed/mitmproxy,mitmproxy/mitmproxy,mitmproxy/mitmproxy,Kriechi/mitmproxy,StevenVanAcker/mitmproxy,vhaupert/mitmproxy,vhaupert/mitmproxy,laurmurclar/mitmproxy,MatthewShao/mitmproxy,cortesi/mitmproxy,ddworken/mitmproxy,vhaupert/mitmproxy,dwfreed/mitmproxy,zlorb/mitmproxy,mhils/mitmproxy,ddworken/mitmproxy,cortesi/mitmproxy,ujjwal96/mitmproxy,laurmurclar/mitmproxy,xaxa89/mitmproxy,StevenVanAcker/mitmproxy,cortesi/mitmproxy,ddworken/mitmproxy,laurmurclar/mitmproxy,mhils/mitmproxy,zlorb/mitmproxy,StevenVanAcker/mitmproxy,mhils/mitmproxy,mhils/mitmproxy,StevenVanAcker/mitmproxy,Kriechi/mitmproxy,ujjwal96/mitmproxy,MatthewShao/mitmproxy
|
from mitmproxy import ctx
def request(flow):
f = ctx.master.state.duplicate_flow(flow)
f.request.path = "/changed"
ctx.master.replay_request(f, block=True, run_scripthooks=False)
Remove dead run_scripthooks example reference.
|
from mitmproxy import ctx
def request(flow):
f = ctx.master.state.duplicate_flow(flow)
f.request.path = "/changed"
ctx.master.replay_request(f, block=True)
|
<commit_before>from mitmproxy import ctx
def request(flow):
f = ctx.master.state.duplicate_flow(flow)
f.request.path = "/changed"
ctx.master.replay_request(f, block=True, run_scripthooks=False)
<commit_msg>Remove dead run_scripthooks example reference.<commit_after>
|
from mitmproxy import ctx
def request(flow):
f = ctx.master.state.duplicate_flow(flow)
f.request.path = "/changed"
ctx.master.replay_request(f, block=True)
|
from mitmproxy import ctx
def request(flow):
f = ctx.master.state.duplicate_flow(flow)
f.request.path = "/changed"
ctx.master.replay_request(f, block=True, run_scripthooks=False)
Remove dead run_scripthooks example reference.from mitmproxy import ctx
def request(flow):
f = ctx.master.state.duplicate_flow(flow)
f.request.path = "/changed"
ctx.master.replay_request(f, block=True)
|
<commit_before>from mitmproxy import ctx
def request(flow):
f = ctx.master.state.duplicate_flow(flow)
f.request.path = "/changed"
ctx.master.replay_request(f, block=True, run_scripthooks=False)
<commit_msg>Remove dead run_scripthooks example reference.<commit_after>from mitmproxy import ctx
def request(flow):
f = ctx.master.state.duplicate_flow(flow)
f.request.path = "/changed"
ctx.master.replay_request(f, block=True)
|
66ea7be70d37c8431d6daef976c6d5c9a7407ea0
|
examples/example_injury.py
|
examples/example_injury.py
|
#!/usr/bin/env python
from tabulate import tabulate
from mlbgame import injury
import dateutil.parser
from datetime import datetime
team_id = 117 # Houston Astros
i = injury.Injury(team_id)
injuries = []
for inj in i.injuries:
team = inj.team_name
injury = ['{0}, {1} ({2})'.format(inj.name_last, inj.name_first,
inj.position), inj.insert_ts, inj.injury_status, inj.due_back,
inj.injury_desc, inj.injury_update]
injuries.append(injury)
print tabulate(injuries, headers=[team, 'Updated', 'Status', 'Due Back',
'Injury', 'Notes'])
print
print 'Last Updated: %s' % i.last_update
"""
"""
|
Add example for injury class
|
Add example for injury class
|
Python
|
mit
|
panzarino/mlbgame,zachpanz88/mlbgame
|
Add example for injury class
|
#!/usr/bin/env python
from tabulate import tabulate
from mlbgame import injury
import dateutil.parser
from datetime import datetime
team_id = 117 # Houston Astros
i = injury.Injury(team_id)
injuries = []
for inj in i.injuries:
team = inj.team_name
injury = ['{0}, {1} ({2})'.format(inj.name_last, inj.name_first,
inj.position), inj.insert_ts, inj.injury_status, inj.due_back,
inj.injury_desc, inj.injury_update]
injuries.append(injury)
print tabulate(injuries, headers=[team, 'Updated', 'Status', 'Due Back',
'Injury', 'Notes'])
print
print 'Last Updated: %s' % i.last_update
"""
"""
|
<commit_before><commit_msg>Add example for injury class<commit_after>
|
#!/usr/bin/env python
from tabulate import tabulate
from mlbgame import injury
import dateutil.parser
from datetime import datetime
team_id = 117 # Houston Astros
i = injury.Injury(team_id)
injuries = []
for inj in i.injuries:
team = inj.team_name
injury = ['{0}, {1} ({2})'.format(inj.name_last, inj.name_first,
inj.position), inj.insert_ts, inj.injury_status, inj.due_back,
inj.injury_desc, inj.injury_update]
injuries.append(injury)
print tabulate(injuries, headers=[team, 'Updated', 'Status', 'Due Back',
'Injury', 'Notes'])
print
print 'Last Updated: %s' % i.last_update
"""
"""
|
Add example for injury class#!/usr/bin/env python
from tabulate import tabulate
from mlbgame import injury
import dateutil.parser
from datetime import datetime
team_id = 117 # Houston Astros
i = injury.Injury(team_id)
injuries = []
for inj in i.injuries:
team = inj.team_name
injury = ['{0}, {1} ({2})'.format(inj.name_last, inj.name_first,
inj.position), inj.insert_ts, inj.injury_status, inj.due_back,
inj.injury_desc, inj.injury_update]
injuries.append(injury)
print tabulate(injuries, headers=[team, 'Updated', 'Status', 'Due Back',
'Injury', 'Notes'])
print
print 'Last Updated: %s' % i.last_update
"""
"""
|
<commit_before><commit_msg>Add example for injury class<commit_after>#!/usr/bin/env python
from tabulate import tabulate
from mlbgame import injury
import dateutil.parser
from datetime import datetime
team_id = 117 # Houston Astros
i = injury.Injury(team_id)
injuries = []
for inj in i.injuries:
team = inj.team_name
injury = ['{0}, {1} ({2})'.format(inj.name_last, inj.name_first,
inj.position), inj.insert_ts, inj.injury_status, inj.due_back,
inj.injury_desc, inj.injury_update]
injuries.append(injury)
print tabulate(injuries, headers=[team, 'Updated', 'Status', 'Due Back',
'Injury', 'Notes'])
print
print 'Last Updated: %s' % i.last_update
"""
"""
|
|
68009fdf18081033609bc1f98afc1b7e6065c35d
|
examples/get_yara_rules.py
|
examples/get_yara_rules.py
|
import urllib2
import socket
from OTXv2 import OTXv2
from OTXv2 import OTXv2, IndicatorTypes
otx = OTXv2('API_KEY')
pulses = otx.getall()
for i in range(0,len(pulses)-1):
print ("// https://otx.alienvault.com/pulse/" + pulses[i]["id"])
indicators = pulses[i]["indicators"]
for ind in indicators:
if ind['type'] == "YARA":
print(ind['content'])
|
Add get yara rules script
|
Add get yara rules script
|
Python
|
apache-2.0
|
AlienVault-Labs/OTX-Python-SDK
|
Add get yara rules script
|
import urllib2
import socket
from OTXv2 import OTXv2
from OTXv2 import OTXv2, IndicatorTypes
otx = OTXv2('API_KEY')
pulses = otx.getall()
for i in range(0,len(pulses)-1):
print ("// https://otx.alienvault.com/pulse/" + pulses[i]["id"])
indicators = pulses[i]["indicators"]
for ind in indicators:
if ind['type'] == "YARA":
print(ind['content'])
|
<commit_before><commit_msg>Add get yara rules script<commit_after>
|
import urllib2
import socket
from OTXv2 import OTXv2
from OTXv2 import OTXv2, IndicatorTypes
otx = OTXv2('API_KEY')
pulses = otx.getall()
for i in range(0,len(pulses)-1):
print ("// https://otx.alienvault.com/pulse/" + pulses[i]["id"])
indicators = pulses[i]["indicators"]
for ind in indicators:
if ind['type'] == "YARA":
print(ind['content'])
|
Add get yara rules scriptimport urllib2
import socket
from OTXv2 import OTXv2
from OTXv2 import OTXv2, IndicatorTypes
otx = OTXv2('API_KEY')
pulses = otx.getall()
for i in range(0,len(pulses)-1):
print ("// https://otx.alienvault.com/pulse/" + pulses[i]["id"])
indicators = pulses[i]["indicators"]
for ind in indicators:
if ind['type'] == "YARA":
print(ind['content'])
|
<commit_before><commit_msg>Add get yara rules script<commit_after>import urllib2
import socket
from OTXv2 import OTXv2
from OTXv2 import OTXv2, IndicatorTypes
otx = OTXv2('API_KEY')
pulses = otx.getall()
for i in range(0,len(pulses)-1):
print ("// https://otx.alienvault.com/pulse/" + pulses[i]["id"])
indicators = pulses[i]["indicators"]
for ind in indicators:
if ind['type'] == "YARA":
print(ind['content'])
|
|
ddf8b393a0695b71400a35a1a218c734a9bc669c
|
regscrape/regs_common/data_import.py
|
regscrape/regs_common/data_import.py
|
import pymongo
import gridfs
import settings
def copy_data(source_db_name, dest_db_name, query):
source = pymongo.Connection(**settings.DB_SETTINGS)[source_db_name]
dest = pymongo.Connection(**settings.DB_SETTINGS)[dest_db_name]
source_gridfs = gridfs.GridFS(source, collection='files')
dest_gridfs = gridfs.GridFS(dest, collection='files')
for doc in source.docs.find(query):
print 'Copying doc %s...' % doc['_id']
# flip some flags
doc['stats'] = {}
doc['in_aggregates'] = False
doc['in_cluster_db'] = False
doc['in_search_index'] = False
dest.docs.save(doc)
file_ids = []
for view in doc.get('views', []):
if view.get('content', None):
file_ids.append(view['content'])
for attachment in doc.get('attachments', []):
for view in attachment.get('views', []):
if view.get('content', None):
file_ids.append(view['content'])
for fid in file_ids:
print "Copying file %s" % fid
# delete out of the dest in case it's already there
dest_gridfs.delete(fid)
# then read out from the old one
fdata = source_gridfs.get(fid).read()
# ... and write to the new one
dest_gridfs.put(fdata, _id=fid)
print "Done."
dkt_query = dict(query)
if "docket_id" in dkt_query:
dkt_query['_id'] = dkt_query['docket_id']
del dkt_query['docket_id']
for dkt in source.dockets.find(dkt_query):
print 'Copying docket %s...' % dkt['_id']
# flip some flags
dkt['stats'] = {}
dkt['in_search_index'] = False
if 'source' not in dkt:
dkt['source'] = 'regulations.gov'
dest.dockets.save(dkt)
print "Done."
|
Add a utility for copying data from one database to another.
|
Add a utility for copying data from one database to another.
|
Python
|
bsd-3-clause
|
sunlightlabs/regulations-scraper,sunlightlabs/regulations-scraper,sunlightlabs/regulations-scraper
|
Add a utility for copying data from one database to another.
|
import pymongo
import gridfs
import settings
def copy_data(source_db_name, dest_db_name, query):
source = pymongo.Connection(**settings.DB_SETTINGS)[source_db_name]
dest = pymongo.Connection(**settings.DB_SETTINGS)[dest_db_name]
source_gridfs = gridfs.GridFS(source, collection='files')
dest_gridfs = gridfs.GridFS(dest, collection='files')
for doc in source.docs.find(query):
print 'Copying doc %s...' % doc['_id']
# flip some flags
doc['stats'] = {}
doc['in_aggregates'] = False
doc['in_cluster_db'] = False
doc['in_search_index'] = False
dest.docs.save(doc)
file_ids = []
for view in doc.get('views', []):
if view.get('content', None):
file_ids.append(view['content'])
for attachment in doc.get('attachments', []):
for view in attachment.get('views', []):
if view.get('content', None):
file_ids.append(view['content'])
for fid in file_ids:
print "Copying file %s" % fid
# delete out of the dest in case it's already there
dest_gridfs.delete(fid)
# then read out from the old one
fdata = source_gridfs.get(fid).read()
# ... and write to the new one
dest_gridfs.put(fdata, _id=fid)
print "Done."
dkt_query = dict(query)
if "docket_id" in dkt_query:
dkt_query['_id'] = dkt_query['docket_id']
del dkt_query['docket_id']
for dkt in source.dockets.find(dkt_query):
print 'Copying docket %s...' % dkt['_id']
# flip some flags
dkt['stats'] = {}
dkt['in_search_index'] = False
if 'source' not in dkt:
dkt['source'] = 'regulations.gov'
dest.dockets.save(dkt)
print "Done."
|
<commit_before><commit_msg>Add a utility for copying data from one database to another.<commit_after>
|
import pymongo
import gridfs
import settings
def copy_data(source_db_name, dest_db_name, query):
source = pymongo.Connection(**settings.DB_SETTINGS)[source_db_name]
dest = pymongo.Connection(**settings.DB_SETTINGS)[dest_db_name]
source_gridfs = gridfs.GridFS(source, collection='files')
dest_gridfs = gridfs.GridFS(dest, collection='files')
for doc in source.docs.find(query):
print 'Copying doc %s...' % doc['_id']
# flip some flags
doc['stats'] = {}
doc['in_aggregates'] = False
doc['in_cluster_db'] = False
doc['in_search_index'] = False
dest.docs.save(doc)
file_ids = []
for view in doc.get('views', []):
if view.get('content', None):
file_ids.append(view['content'])
for attachment in doc.get('attachments', []):
for view in attachment.get('views', []):
if view.get('content', None):
file_ids.append(view['content'])
for fid in file_ids:
print "Copying file %s" % fid
# delete out of the dest in case it's already there
dest_gridfs.delete(fid)
# then read out from the old one
fdata = source_gridfs.get(fid).read()
# ... and write to the new one
dest_gridfs.put(fdata, _id=fid)
print "Done."
dkt_query = dict(query)
if "docket_id" in dkt_query:
dkt_query['_id'] = dkt_query['docket_id']
del dkt_query['docket_id']
for dkt in source.dockets.find(dkt_query):
print 'Copying docket %s...' % dkt['_id']
# flip some flags
dkt['stats'] = {}
dkt['in_search_index'] = False
if 'source' not in dkt:
dkt['source'] = 'regulations.gov'
dest.dockets.save(dkt)
print "Done."
|
Add a utility for copying data from one database to another.import pymongo
import gridfs
import settings
def copy_data(source_db_name, dest_db_name, query):
source = pymongo.Connection(**settings.DB_SETTINGS)[source_db_name]
dest = pymongo.Connection(**settings.DB_SETTINGS)[dest_db_name]
source_gridfs = gridfs.GridFS(source, collection='files')
dest_gridfs = gridfs.GridFS(dest, collection='files')
for doc in source.docs.find(query):
print 'Copying doc %s...' % doc['_id']
# flip some flags
doc['stats'] = {}
doc['in_aggregates'] = False
doc['in_cluster_db'] = False
doc['in_search_index'] = False
dest.docs.save(doc)
file_ids = []
for view in doc.get('views', []):
if view.get('content', None):
file_ids.append(view['content'])
for attachment in doc.get('attachments', []):
for view in attachment.get('views', []):
if view.get('content', None):
file_ids.append(view['content'])
for fid in file_ids:
print "Copying file %s" % fid
# delete out of the dest in case it's already there
dest_gridfs.delete(fid)
# then read out from the old one
fdata = source_gridfs.get(fid).read()
# ... and write to the new one
dest_gridfs.put(fdata, _id=fid)
print "Done."
dkt_query = dict(query)
if "docket_id" in dkt_query:
dkt_query['_id'] = dkt_query['docket_id']
del dkt_query['docket_id']
for dkt in source.dockets.find(dkt_query):
print 'Copying docket %s...' % dkt['_id']
# flip some flags
dkt['stats'] = {}
dkt['in_search_index'] = False
if 'source' not in dkt:
dkt['source'] = 'regulations.gov'
dest.dockets.save(dkt)
print "Done."
|
<commit_before><commit_msg>Add a utility for copying data from one database to another.<commit_after>import pymongo
import gridfs
import settings
def copy_data(source_db_name, dest_db_name, query):
source = pymongo.Connection(**settings.DB_SETTINGS)[source_db_name]
dest = pymongo.Connection(**settings.DB_SETTINGS)[dest_db_name]
source_gridfs = gridfs.GridFS(source, collection='files')
dest_gridfs = gridfs.GridFS(dest, collection='files')
for doc in source.docs.find(query):
print 'Copying doc %s...' % doc['_id']
# flip some flags
doc['stats'] = {}
doc['in_aggregates'] = False
doc['in_cluster_db'] = False
doc['in_search_index'] = False
dest.docs.save(doc)
file_ids = []
for view in doc.get('views', []):
if view.get('content', None):
file_ids.append(view['content'])
for attachment in doc.get('attachments', []):
for view in attachment.get('views', []):
if view.get('content', None):
file_ids.append(view['content'])
for fid in file_ids:
print "Copying file %s" % fid
# delete out of the dest in case it's already there
dest_gridfs.delete(fid)
# then read out from the old one
fdata = source_gridfs.get(fid).read()
# ... and write to the new one
dest_gridfs.put(fdata, _id=fid)
print "Done."
dkt_query = dict(query)
if "docket_id" in dkt_query:
dkt_query['_id'] = dkt_query['docket_id']
del dkt_query['docket_id']
for dkt in source.dockets.find(dkt_query):
print 'Copying docket %s...' % dkt['_id']
# flip some flags
dkt['stats'] = {}
dkt['in_search_index'] = False
if 'source' not in dkt:
dkt['source'] = 'regulations.gov'
dest.dockets.save(dkt)
print "Done."
|
|
6354d6a7cbfa253562f983a9002827af2b3cc819
|
csv_insert_script.py
|
csv_insert_script.py
|
#!/usr/bin/env python2.7
import csv
import sqlite3
conn = sqlite3.connect("twdb.db")
cur = conn.cursor()
file = open('stgcd_manual.csv', 'r')
reader = csv.reader(file)
readings = []
twdb_nums = []
for row in reader:
readings.append(row)
if (row[0] not in twdb_nums):
twdb_nums.append(row[0])
readings.pop(0)
twdb_nums.pop(0)
for twdb in twdb_nums:
cur.execute("select owner_1, owner_2 from twdb_weldta where " +
"state_well_number = ?", [twdb])
row = cur.fetchone()
print ("insert into well_sites_manual (twdb, name) values (" + str(twdb) +
", '" + str(str(row[0]) + " " + str(row[1])).strip() + "');")
for reading in readings:
print("insert into well_readings_manual (twdb_id, date, depth_to_water) " +
"values (" + str(reading[0]) + ", '" + str(reading[1]) + "', " +
str(reading[2]) + ");")
|
Add script to generate SQL script for STGCD manual readings.
|
Add script to generate SQL script for STGCD manual readings.
|
Python
|
isc
|
wablair/misc_scripts,wablair/misc_scripts,wablair/misc_scripts,wablair/misc_scripts
|
Add script to generate SQL script for STGCD manual readings.
|
#!/usr/bin/env python2.7
import csv
import sqlite3
conn = sqlite3.connect("twdb.db")
cur = conn.cursor()
file = open('stgcd_manual.csv', 'r')
reader = csv.reader(file)
readings = []
twdb_nums = []
for row in reader:
readings.append(row)
if (row[0] not in twdb_nums):
twdb_nums.append(row[0])
readings.pop(0)
twdb_nums.pop(0)
for twdb in twdb_nums:
cur.execute("select owner_1, owner_2 from twdb_weldta where " +
"state_well_number = ?", [twdb])
row = cur.fetchone()
print ("insert into well_sites_manual (twdb, name) values (" + str(twdb) +
", '" + str(str(row[0]) + " " + str(row[1])).strip() + "');")
for reading in readings:
print("insert into well_readings_manual (twdb_id, date, depth_to_water) " +
"values (" + str(reading[0]) + ", '" + str(reading[1]) + "', " +
str(reading[2]) + ");")
|
<commit_before><commit_msg>Add script to generate SQL script for STGCD manual readings.<commit_after>
|
#!/usr/bin/env python2.7
import csv
import sqlite3
conn = sqlite3.connect("twdb.db")
cur = conn.cursor()
file = open('stgcd_manual.csv', 'r')
reader = csv.reader(file)
readings = []
twdb_nums = []
for row in reader:
readings.append(row)
if (row[0] not in twdb_nums):
twdb_nums.append(row[0])
readings.pop(0)
twdb_nums.pop(0)
for twdb in twdb_nums:
cur.execute("select owner_1, owner_2 from twdb_weldta where " +
"state_well_number = ?", [twdb])
row = cur.fetchone()
print ("insert into well_sites_manual (twdb, name) values (" + str(twdb) +
", '" + str(str(row[0]) + " " + str(row[1])).strip() + "');")
for reading in readings:
print("insert into well_readings_manual (twdb_id, date, depth_to_water) " +
"values (" + str(reading[0]) + ", '" + str(reading[1]) + "', " +
str(reading[2]) + ");")
|
Add script to generate SQL script for STGCD manual readings.#!/usr/bin/env python2.7
import csv
import sqlite3
conn = sqlite3.connect("twdb.db")
cur = conn.cursor()
file = open('stgcd_manual.csv', 'r')
reader = csv.reader(file)
readings = []
twdb_nums = []
for row in reader:
readings.append(row)
if (row[0] not in twdb_nums):
twdb_nums.append(row[0])
readings.pop(0)
twdb_nums.pop(0)
for twdb in twdb_nums:
cur.execute("select owner_1, owner_2 from twdb_weldta where " +
"state_well_number = ?", [twdb])
row = cur.fetchone()
print ("insert into well_sites_manual (twdb, name) values (" + str(twdb) +
", '" + str(str(row[0]) + " " + str(row[1])).strip() + "');")
for reading in readings:
print("insert into well_readings_manual (twdb_id, date, depth_to_water) " +
"values (" + str(reading[0]) + ", '" + str(reading[1]) + "', " +
str(reading[2]) + ");")
|
<commit_before><commit_msg>Add script to generate SQL script for STGCD manual readings.<commit_after>#!/usr/bin/env python2.7
import csv
import sqlite3
conn = sqlite3.connect("twdb.db")
cur = conn.cursor()
file = open('stgcd_manual.csv', 'r')
reader = csv.reader(file)
readings = []
twdb_nums = []
for row in reader:
readings.append(row)
if (row[0] not in twdb_nums):
twdb_nums.append(row[0])
readings.pop(0)
twdb_nums.pop(0)
for twdb in twdb_nums:
cur.execute("select owner_1, owner_2 from twdb_weldta where " +
"state_well_number = ?", [twdb])
row = cur.fetchone()
print ("insert into well_sites_manual (twdb, name) values (" + str(twdb) +
", '" + str(str(row[0]) + " " + str(row[1])).strip() + "');")
for reading in readings:
print("insert into well_readings_manual (twdb_id, date, depth_to_water) " +
"values (" + str(reading[0]) + ", '" + str(reading[1]) + "', " +
str(reading[2]) + ");")
|
|
3f09462f3be6f7d59d36ffaf5e4dae4be74b623b
|
private/realclearpolitics-scraper/scraper.py
|
private/realclearpolitics-scraper/scraper.py
|
import sys, os
args = sys.argv
if (len(args) <= 1):
print("ERROR: Please provide source url")
print("Example : python scraper.py url output.csv")
else:
url = args[1]
if (len(args) == 2):
filename = url.split('/')[-1].split('.')[0]
output = filename + ".csv"
print("No output file specified : using " + output)
else:
output = sys.argv[2]
if not output.endswith(".csv"):
output = output + ".csv"
if os.path.isfile(output):
os.remove(output)
os.system("scrapy crawl realclearpoliticsSpider -a url="+url+" -o "+output)
|
import sys, os
args = sys.argv
if (len(args) <= 1):
raise ValueError('Please provide source url when calling scraper.py. Example : python scraper.py url output.csv')
else:
url = args[1]
if (len(args) == 2):
filename = url.split('/')[-1].split('.')[0]
output = filename + ".csv"
print("No output file specified : using " + output)
else:
output = sys.argv[2]
if not output.endswith(".csv"):
output = output + ".csv"
if os.path.isfile(output):
os.remove(output)
os.system("scrapy crawl realclearpoliticsSpider -a url="+url+" -o "+output)
|
Raise exception when script called with wrong args
|
Raise exception when script called with wrong args
(instead of just printing)
|
Python
|
mit
|
Rumel/berniemetrics,Rumel/berniemetrics,Rumel/berniemetrics,dpxxdp/berniemetrics,Rumel/berniemetrics,fpagnoux/berniemetrics,fpagnoux/berniemetrics,fpagnoux/berniemetrics,dpxxdp/berniemetrics,fpagnoux/berniemetrics,dpxxdp/berniemetrics,dpxxdp/berniemetrics
|
import sys, os
args = sys.argv
if (len(args) <= 1):
print("ERROR: Please provide source url")
print("Example : python scraper.py url output.csv")
else:
url = args[1]
if (len(args) == 2):
filename = url.split('/')[-1].split('.')[0]
output = filename + ".csv"
print("No output file specified : using " + output)
else:
output = sys.argv[2]
if not output.endswith(".csv"):
output = output + ".csv"
if os.path.isfile(output):
os.remove(output)
os.system("scrapy crawl realclearpoliticsSpider -a url="+url+" -o "+output)
Raise exception when script called with wrong args
(instead of just printing)
|
import sys, os
args = sys.argv
if (len(args) <= 1):
raise ValueError('Please provide source url when calling scraper.py. Example : python scraper.py url output.csv')
else:
url = args[1]
if (len(args) == 2):
filename = url.split('/')[-1].split('.')[0]
output = filename + ".csv"
print("No output file specified : using " + output)
else:
output = sys.argv[2]
if not output.endswith(".csv"):
output = output + ".csv"
if os.path.isfile(output):
os.remove(output)
os.system("scrapy crawl realclearpoliticsSpider -a url="+url+" -o "+output)
|
<commit_before>import sys, os
args = sys.argv
if (len(args) <= 1):
print("ERROR: Please provide source url")
print("Example : python scraper.py url output.csv")
else:
url = args[1]
if (len(args) == 2):
filename = url.split('/')[-1].split('.')[0]
output = filename + ".csv"
print("No output file specified : using " + output)
else:
output = sys.argv[2]
if not output.endswith(".csv"):
output = output + ".csv"
if os.path.isfile(output):
os.remove(output)
os.system("scrapy crawl realclearpoliticsSpider -a url="+url+" -o "+output)
<commit_msg>Raise exception when script called with wrong args
(instead of just printing)<commit_after>
|
import sys, os
args = sys.argv
if (len(args) <= 1):
raise ValueError('Please provide source url when calling scraper.py. Example : python scraper.py url output.csv')
else:
url = args[1]
if (len(args) == 2):
filename = url.split('/')[-1].split('.')[0]
output = filename + ".csv"
print("No output file specified : using " + output)
else:
output = sys.argv[2]
if not output.endswith(".csv"):
output = output + ".csv"
if os.path.isfile(output):
os.remove(output)
os.system("scrapy crawl realclearpoliticsSpider -a url="+url+" -o "+output)
|
import sys, os
args = sys.argv
if (len(args) <= 1):
print("ERROR: Please provide source url")
print("Example : python scraper.py url output.csv")
else:
url = args[1]
if (len(args) == 2):
filename = url.split('/')[-1].split('.')[0]
output = filename + ".csv"
print("No output file specified : using " + output)
else:
output = sys.argv[2]
if not output.endswith(".csv"):
output = output + ".csv"
if os.path.isfile(output):
os.remove(output)
os.system("scrapy crawl realclearpoliticsSpider -a url="+url+" -o "+output)
Raise exception when script called with wrong args
(instead of just printing)import sys, os
args = sys.argv
if (len(args) <= 1):
raise ValueError('Please provide source url when calling scraper.py. Example : python scraper.py url output.csv')
else:
url = args[1]
if (len(args) == 2):
filename = url.split('/')[-1].split('.')[0]
output = filename + ".csv"
print("No output file specified : using " + output)
else:
output = sys.argv[2]
if not output.endswith(".csv"):
output = output + ".csv"
if os.path.isfile(output):
os.remove(output)
os.system("scrapy crawl realclearpoliticsSpider -a url="+url+" -o "+output)
|
<commit_before>import sys, os
args = sys.argv
if (len(args) <= 1):
print("ERROR: Please provide source url")
print("Example : python scraper.py url output.csv")
else:
url = args[1]
if (len(args) == 2):
filename = url.split('/')[-1].split('.')[0]
output = filename + ".csv"
print("No output file specified : using " + output)
else:
output = sys.argv[2]
if not output.endswith(".csv"):
output = output + ".csv"
if os.path.isfile(output):
os.remove(output)
os.system("scrapy crawl realclearpoliticsSpider -a url="+url+" -o "+output)
<commit_msg>Raise exception when script called with wrong args
(instead of just printing)<commit_after>import sys, os
args = sys.argv
if (len(args) <= 1):
raise ValueError('Please provide source url when calling scraper.py. Example : python scraper.py url output.csv')
else:
url = args[1]
if (len(args) == 2):
filename = url.split('/')[-1].split('.')[0]
output = filename + ".csv"
print("No output file specified : using " + output)
else:
output = sys.argv[2]
if not output.endswith(".csv"):
output = output + ".csv"
if os.path.isfile(output):
os.remove(output)
os.system("scrapy crawl realclearpoliticsSpider -a url="+url+" -o "+output)
|
f536adf23907282aa26c6b844dc6875d5412553d
|
raw2obj.py
|
raw2obj.py
|
import numpy as np
import scipy.misc
import mcubes
import argparse
from sklearn.neighbors import NearestNeighbors
parser = argparse.ArgumentParser(description='Visualise the 3D volume')
parser.add_argument('--image', dest='image',
help="The background image to display")
parser.add_argument('--volume', dest='volume',
help="The volume to render")
parser.add_argument('--obj', dest='obj',
help="The file path of the object")
args = parser.parse_args()
im = scipy.misc.imread(args.image, False, 'RGB')
vol = np.fromfile(args.volume, dtype=np.int8)
vol = vol.reshape((200,192,192))
vol = vol.astype(float)
vertices, triangles = mcubes.marching_cubes(vol, 10)
vertices = vertices[:,(2,1,0)]
vertices[:,2] *= 0.5 # scale the Z component correctly
r = im[:,:,0].flatten()
g = im[:,:,1].flatten()
b = im[:,:,2].flatten()
vcx,vcy = np.meshgrid(np.arange(0,192),np.arange(0,192))
vcx = vcx.flatten()
vcy = vcy.flatten()
vc = np.vstack((vcx, vcy, r, g, b)).transpose()
neigh = NearestNeighbors(n_neighbors=1)
neigh.fit(vc[:,:2])
n = neigh.kneighbors(vertices[:,(0,1)], return_distance=False)
colour = vc[n,2:].reshape((vertices.shape[0],3)).astype(float) / 255
vc = np.hstack((vertices, colour))
with open(args.obj, 'w') as f:
for v in range(0,vc.shape[0]):
f.write('v %0.2f %0.2f %0.2f %0.2f %0.2f %0.2f\n' % (vc[v,0],vc[v,1],vc[v,2],vc[v,3],vc[v,4],vc[v,5]))
for t in range(0,triangles.shape[0]):
f.write('f {} {} {}\n'.format(*triangles[t,:]+1))
print('Calculated the isosurface.')
|
Convert the raw data to obj file using PyMCubes.
|
Convert the raw data to obj file using PyMCubes.
|
Python
|
mit
|
AaronJackson/vrn,AaronJackson/vrn,AaronJackson/vrn
|
Convert the raw data to obj file using PyMCubes.
|
import numpy as np
import scipy.misc
import mcubes
import argparse
from sklearn.neighbors import NearestNeighbors
parser = argparse.ArgumentParser(description='Visualise the 3D volume')
parser.add_argument('--image', dest='image',
help="The background image to display")
parser.add_argument('--volume', dest='volume',
help="The volume to render")
parser.add_argument('--obj', dest='obj',
help="The file path of the object")
args = parser.parse_args()
im = scipy.misc.imread(args.image, False, 'RGB')
vol = np.fromfile(args.volume, dtype=np.int8)
vol = vol.reshape((200,192,192))
vol = vol.astype(float)
vertices, triangles = mcubes.marching_cubes(vol, 10)
vertices = vertices[:,(2,1,0)]
vertices[:,2] *= 0.5 # scale the Z component correctly
r = im[:,:,0].flatten()
g = im[:,:,1].flatten()
b = im[:,:,2].flatten()
vcx,vcy = np.meshgrid(np.arange(0,192),np.arange(0,192))
vcx = vcx.flatten()
vcy = vcy.flatten()
vc = np.vstack((vcx, vcy, r, g, b)).transpose()
neigh = NearestNeighbors(n_neighbors=1)
neigh.fit(vc[:,:2])
n = neigh.kneighbors(vertices[:,(0,1)], return_distance=False)
colour = vc[n,2:].reshape((vertices.shape[0],3)).astype(float) / 255
vc = np.hstack((vertices, colour))
with open(args.obj, 'w') as f:
for v in range(0,vc.shape[0]):
f.write('v %0.2f %0.2f %0.2f %0.2f %0.2f %0.2f\n' % (vc[v,0],vc[v,1],vc[v,2],vc[v,3],vc[v,4],vc[v,5]))
for t in range(0,triangles.shape[0]):
f.write('f {} {} {}\n'.format(*triangles[t,:]+1))
print('Calculated the isosurface.')
|
<commit_before><commit_msg>Convert the raw data to obj file using PyMCubes.<commit_after>
|
import numpy as np
import scipy.misc
import mcubes
import argparse
from sklearn.neighbors import NearestNeighbors
parser = argparse.ArgumentParser(description='Visualise the 3D volume')
parser.add_argument('--image', dest='image',
help="The background image to display")
parser.add_argument('--volume', dest='volume',
help="The volume to render")
parser.add_argument('--obj', dest='obj',
help="The file path of the object")
args = parser.parse_args()
im = scipy.misc.imread(args.image, False, 'RGB')
vol = np.fromfile(args.volume, dtype=np.int8)
vol = vol.reshape((200,192,192))
vol = vol.astype(float)
vertices, triangles = mcubes.marching_cubes(vol, 10)
vertices = vertices[:,(2,1,0)]
vertices[:,2] *= 0.5 # scale the Z component correctly
r = im[:,:,0].flatten()
g = im[:,:,1].flatten()
b = im[:,:,2].flatten()
vcx,vcy = np.meshgrid(np.arange(0,192),np.arange(0,192))
vcx = vcx.flatten()
vcy = vcy.flatten()
vc = np.vstack((vcx, vcy, r, g, b)).transpose()
neigh = NearestNeighbors(n_neighbors=1)
neigh.fit(vc[:,:2])
n = neigh.kneighbors(vertices[:,(0,1)], return_distance=False)
colour = vc[n,2:].reshape((vertices.shape[0],3)).astype(float) / 255
vc = np.hstack((vertices, colour))
with open(args.obj, 'w') as f:
for v in range(0,vc.shape[0]):
f.write('v %0.2f %0.2f %0.2f %0.2f %0.2f %0.2f\n' % (vc[v,0],vc[v,1],vc[v,2],vc[v,3],vc[v,4],vc[v,5]))
for t in range(0,triangles.shape[0]):
f.write('f {} {} {}\n'.format(*triangles[t,:]+1))
print('Calculated the isosurface.')
|
Convert the raw data to obj file using PyMCubes.import numpy as np
import scipy.misc
import mcubes
import argparse
from sklearn.neighbors import NearestNeighbors
parser = argparse.ArgumentParser(description='Visualise the 3D volume')
parser.add_argument('--image', dest='image',
help="The background image to display")
parser.add_argument('--volume', dest='volume',
help="The volume to render")
parser.add_argument('--obj', dest='obj',
help="The file path of the object")
args = parser.parse_args()
im = scipy.misc.imread(args.image, False, 'RGB')
vol = np.fromfile(args.volume, dtype=np.int8)
vol = vol.reshape((200,192,192))
vol = vol.astype(float)
vertices, triangles = mcubes.marching_cubes(vol, 10)
vertices = vertices[:,(2,1,0)]
vertices[:,2] *= 0.5 # scale the Z component correctly
r = im[:,:,0].flatten()
g = im[:,:,1].flatten()
b = im[:,:,2].flatten()
vcx,vcy = np.meshgrid(np.arange(0,192),np.arange(0,192))
vcx = vcx.flatten()
vcy = vcy.flatten()
vc = np.vstack((vcx, vcy, r, g, b)).transpose()
neigh = NearestNeighbors(n_neighbors=1)
neigh.fit(vc[:,:2])
n = neigh.kneighbors(vertices[:,(0,1)], return_distance=False)
colour = vc[n,2:].reshape((vertices.shape[0],3)).astype(float) / 255
vc = np.hstack((vertices, colour))
with open(args.obj, 'w') as f:
for v in range(0,vc.shape[0]):
f.write('v %0.2f %0.2f %0.2f %0.2f %0.2f %0.2f\n' % (vc[v,0],vc[v,1],vc[v,2],vc[v,3],vc[v,4],vc[v,5]))
for t in range(0,triangles.shape[0]):
f.write('f {} {} {}\n'.format(*triangles[t,:]+1))
print('Calculated the isosurface.')
|
<commit_before><commit_msg>Convert the raw data to obj file using PyMCubes.<commit_after>import numpy as np
import scipy.misc
import mcubes
import argparse
from sklearn.neighbors import NearestNeighbors
parser = argparse.ArgumentParser(description='Visualise the 3D volume')
parser.add_argument('--image', dest='image',
help="The background image to display")
parser.add_argument('--volume', dest='volume',
help="The volume to render")
parser.add_argument('--obj', dest='obj',
help="The file path of the object")
args = parser.parse_args()
im = scipy.misc.imread(args.image, False, 'RGB')
vol = np.fromfile(args.volume, dtype=np.int8)
vol = vol.reshape((200,192,192))
vol = vol.astype(float)
vertices, triangles = mcubes.marching_cubes(vol, 10)
vertices = vertices[:,(2,1,0)]
vertices[:,2] *= 0.5 # scale the Z component correctly
r = im[:,:,0].flatten()
g = im[:,:,1].flatten()
b = im[:,:,2].flatten()
vcx,vcy = np.meshgrid(np.arange(0,192),np.arange(0,192))
vcx = vcx.flatten()
vcy = vcy.flatten()
vc = np.vstack((vcx, vcy, r, g, b)).transpose()
neigh = NearestNeighbors(n_neighbors=1)
neigh.fit(vc[:,:2])
n = neigh.kneighbors(vertices[:,(0,1)], return_distance=False)
colour = vc[n,2:].reshape((vertices.shape[0],3)).astype(float) / 255
vc = np.hstack((vertices, colour))
with open(args.obj, 'w') as f:
for v in range(0,vc.shape[0]):
f.write('v %0.2f %0.2f %0.2f %0.2f %0.2f %0.2f\n' % (vc[v,0],vc[v,1],vc[v,2],vc[v,3],vc[v,4],vc[v,5]))
for t in range(0,triangles.shape[0]):
f.write('f {} {} {}\n'.format(*triangles[t,:]+1))
print('Calculated the isosurface.')
|
|
5770dfc5b5df312dc15f0bc44437c0e62936d688
|
events/migrations/0073_soft_delete_replaced_objects.py
|
events/migrations/0073_soft_delete_replaced_objects.py
|
# Generated by Django 2.2.9 on 2020-01-31 08:25
from django.db import migrations
def soft_delete_replaced_objects(Model, deleted_attr='deleted', replaced_by_attr='replaced_by'):
for obj in Model.objects.filter(**{f'{replaced_by_attr}__isnull': False, deleted_attr: False}):
print(f'Found an object that is replaced but not soft deleted: "{obj}". Soft deleting now.')
setattr(obj, deleted_attr, True)
obj.save()
def forwards(apps, schema_editor):
# Begin printing on a new line
print('')
Keyword = apps.get_model('events', 'Keyword')
Place = apps.get_model('events', 'Place')
Event = apps.get_model('events', 'Event')
soft_delete_replaced_objects(Keyword, deleted_attr='deprecated')
soft_delete_replaced_objects(Place)
soft_delete_replaced_objects(Event)
class Migration(migrations.Migration):
dependencies = [
('events', '0072_allow_replaced_by_blank'),
]
operations = [
migrations.RunPython(forwards, migrations.RunPython.noop)
]
|
Add data migration that deletes replaced objects
|
Add data migration that deletes replaced objects
|
Python
|
mit
|
City-of-Helsinki/linkedevents,City-of-Helsinki/linkedevents,City-of-Helsinki/linkedevents
|
Add data migration that deletes replaced objects
|
# Generated by Django 2.2.9 on 2020-01-31 08:25
from django.db import migrations
def soft_delete_replaced_objects(Model, deleted_attr='deleted', replaced_by_attr='replaced_by'):
for obj in Model.objects.filter(**{f'{replaced_by_attr}__isnull': False, deleted_attr: False}):
print(f'Found an object that is replaced but not soft deleted: "{obj}". Soft deleting now.')
setattr(obj, deleted_attr, True)
obj.save()
def forwards(apps, schema_editor):
# Begin printing on a new line
print('')
Keyword = apps.get_model('events', 'Keyword')
Place = apps.get_model('events', 'Place')
Event = apps.get_model('events', 'Event')
soft_delete_replaced_objects(Keyword, deleted_attr='deprecated')
soft_delete_replaced_objects(Place)
soft_delete_replaced_objects(Event)
class Migration(migrations.Migration):
dependencies = [
('events', '0072_allow_replaced_by_blank'),
]
operations = [
migrations.RunPython(forwards, migrations.RunPython.noop)
]
|
<commit_before><commit_msg>Add data migration that deletes replaced objects<commit_after>
|
# Generated by Django 2.2.9 on 2020-01-31 08:25
from django.db import migrations
def soft_delete_replaced_objects(Model, deleted_attr='deleted', replaced_by_attr='replaced_by'):
for obj in Model.objects.filter(**{f'{replaced_by_attr}__isnull': False, deleted_attr: False}):
print(f'Found an object that is replaced but not soft deleted: "{obj}". Soft deleting now.')
setattr(obj, deleted_attr, True)
obj.save()
def forwards(apps, schema_editor):
# Begin printing on a new line
print('')
Keyword = apps.get_model('events', 'Keyword')
Place = apps.get_model('events', 'Place')
Event = apps.get_model('events', 'Event')
soft_delete_replaced_objects(Keyword, deleted_attr='deprecated')
soft_delete_replaced_objects(Place)
soft_delete_replaced_objects(Event)
class Migration(migrations.Migration):
dependencies = [
('events', '0072_allow_replaced_by_blank'),
]
operations = [
migrations.RunPython(forwards, migrations.RunPython.noop)
]
|
Add data migration that deletes replaced objects# Generated by Django 2.2.9 on 2020-01-31 08:25
from django.db import migrations
def soft_delete_replaced_objects(Model, deleted_attr='deleted', replaced_by_attr='replaced_by'):
for obj in Model.objects.filter(**{f'{replaced_by_attr}__isnull': False, deleted_attr: False}):
print(f'Found an object that is replaced but not soft deleted: "{obj}". Soft deleting now.')
setattr(obj, deleted_attr, True)
obj.save()
def forwards(apps, schema_editor):
# Begin printing on a new line
print('')
Keyword = apps.get_model('events', 'Keyword')
Place = apps.get_model('events', 'Place')
Event = apps.get_model('events', 'Event')
soft_delete_replaced_objects(Keyword, deleted_attr='deprecated')
soft_delete_replaced_objects(Place)
soft_delete_replaced_objects(Event)
class Migration(migrations.Migration):
dependencies = [
('events', '0072_allow_replaced_by_blank'),
]
operations = [
migrations.RunPython(forwards, migrations.RunPython.noop)
]
|
<commit_before><commit_msg>Add data migration that deletes replaced objects<commit_after># Generated by Django 2.2.9 on 2020-01-31 08:25
from django.db import migrations
def soft_delete_replaced_objects(Model, deleted_attr='deleted', replaced_by_attr='replaced_by'):
for obj in Model.objects.filter(**{f'{replaced_by_attr}__isnull': False, deleted_attr: False}):
print(f'Found an object that is replaced but not soft deleted: "{obj}". Soft deleting now.')
setattr(obj, deleted_attr, True)
obj.save()
def forwards(apps, schema_editor):
# Begin printing on a new line
print('')
Keyword = apps.get_model('events', 'Keyword')
Place = apps.get_model('events', 'Place')
Event = apps.get_model('events', 'Event')
soft_delete_replaced_objects(Keyword, deleted_attr='deprecated')
soft_delete_replaced_objects(Place)
soft_delete_replaced_objects(Event)
class Migration(migrations.Migration):
dependencies = [
('events', '0072_allow_replaced_by_blank'),
]
operations = [
migrations.RunPython(forwards, migrations.RunPython.noop)
]
|
|
d08404d9589a0a61aba9eeb01231f5d7d6707bf3
|
services/migrations/0026_auto_20150302_2222.py
|
services/migrations/0026_auto_20150302_2222.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import services.models
class Migration(migrations.Migration):
dependencies = [
('services', '0025_merge'),
]
operations = [
migrations.AlterField(
model_name='provider',
name='name_ar',
field=models.CharField(default='', validators=[services.models.blank_or_at_least_one_letter], max_length=256, blank=True, verbose_name='name in Arabic'),
preserve_default=True,
),
migrations.AlterField(
model_name='provider',
name='name_en',
field=models.CharField(default='', validators=[services.models.blank_or_at_least_one_letter], max_length=256, blank=True, verbose_name='name in English'),
preserve_default=True,
),
migrations.AlterField(
model_name='provider',
name='name_fr',
field=models.CharField(default='', validators=[services.models.blank_or_at_least_one_letter], max_length=256, blank=True, verbose_name='name in French'),
preserve_default=True,
),
]
|
Add missing migration for provider name validators
|
Add missing migration for provider name validators
|
Python
|
bsd-3-clause
|
theirc/ServiceInfo,theirc/ServiceInfo,theirc/ServiceInfo,theirc/ServiceInfo-ircdeploy,theirc/ServiceInfo
|
Add missing migration for provider name validators
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import services.models
class Migration(migrations.Migration):
dependencies = [
('services', '0025_merge'),
]
operations = [
migrations.AlterField(
model_name='provider',
name='name_ar',
field=models.CharField(default='', validators=[services.models.blank_or_at_least_one_letter], max_length=256, blank=True, verbose_name='name in Arabic'),
preserve_default=True,
),
migrations.AlterField(
model_name='provider',
name='name_en',
field=models.CharField(default='', validators=[services.models.blank_or_at_least_one_letter], max_length=256, blank=True, verbose_name='name in English'),
preserve_default=True,
),
migrations.AlterField(
model_name='provider',
name='name_fr',
field=models.CharField(default='', validators=[services.models.blank_or_at_least_one_letter], max_length=256, blank=True, verbose_name='name in French'),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add missing migration for provider name validators<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import services.models
class Migration(migrations.Migration):
dependencies = [
('services', '0025_merge'),
]
operations = [
migrations.AlterField(
model_name='provider',
name='name_ar',
field=models.CharField(default='', validators=[services.models.blank_or_at_least_one_letter], max_length=256, blank=True, verbose_name='name in Arabic'),
preserve_default=True,
),
migrations.AlterField(
model_name='provider',
name='name_en',
field=models.CharField(default='', validators=[services.models.blank_or_at_least_one_letter], max_length=256, blank=True, verbose_name='name in English'),
preserve_default=True,
),
migrations.AlterField(
model_name='provider',
name='name_fr',
field=models.CharField(default='', validators=[services.models.blank_or_at_least_one_letter], max_length=256, blank=True, verbose_name='name in French'),
preserve_default=True,
),
]
|
Add missing migration for provider name validators# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import services.models
class Migration(migrations.Migration):
dependencies = [
('services', '0025_merge'),
]
operations = [
migrations.AlterField(
model_name='provider',
name='name_ar',
field=models.CharField(default='', validators=[services.models.blank_or_at_least_one_letter], max_length=256, blank=True, verbose_name='name in Arabic'),
preserve_default=True,
),
migrations.AlterField(
model_name='provider',
name='name_en',
field=models.CharField(default='', validators=[services.models.blank_or_at_least_one_letter], max_length=256, blank=True, verbose_name='name in English'),
preserve_default=True,
),
migrations.AlterField(
model_name='provider',
name='name_fr',
field=models.CharField(default='', validators=[services.models.blank_or_at_least_one_letter], max_length=256, blank=True, verbose_name='name in French'),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add missing migration for provider name validators<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import services.models
class Migration(migrations.Migration):
dependencies = [
('services', '0025_merge'),
]
operations = [
migrations.AlterField(
model_name='provider',
name='name_ar',
field=models.CharField(default='', validators=[services.models.blank_or_at_least_one_letter], max_length=256, blank=True, verbose_name='name in Arabic'),
preserve_default=True,
),
migrations.AlterField(
model_name='provider',
name='name_en',
field=models.CharField(default='', validators=[services.models.blank_or_at_least_one_letter], max_length=256, blank=True, verbose_name='name in English'),
preserve_default=True,
),
migrations.AlterField(
model_name='provider',
name='name_fr',
field=models.CharField(default='', validators=[services.models.blank_or_at_least_one_letter], max_length=256, blank=True, verbose_name='name in French'),
preserve_default=True,
),
]
|
|
f453221b89d21dc7350706fc707bb3a88012d5ba
|
src/simple_data_export.py
|
src/simple_data_export.py
|
from collections import defaultdict
import json
from math import floor
import sys
import database as db
from database.model import Team
from geotools import simple_distance
from geotools.routing import MapPoint
from webapp.cfg.config import DB_CONNECTION
if len(sys.argv) == 2:
MAX_TEAMS = sys.argv[1]
else:
MAX_TEAMS = 9
print "init db..."
db.init_session(connection_string=DB_CONNECTION)
print "fetch teams..."
teams = db.session.query(Team).filter_by(deleted=False).filter_by(confirmed=True).order_by(Team.id).limit(MAX_TEAMS).all()
data = []
round_teams = defaultdict(list)
max_working = len(teams) - (len(teams) % 3)
divider = max_working / 3.0
def distance_sort(a, b):
if a.location.center_distance > b.location.center_distance:
return -1
if a.location.center_distance < b.location.center_distance:
return 1
return 0
working = teams[:max_working]
teams = sorted(working, distance_sort) + teams[max_working:]
for idx, team in enumerate(teams):
round_idx = 0
if (divider > 0):
round_idx = min(int(floor(idx / divider)), 3)
team_data = {"name": team.name,
"id": team.id,
"location": {"lat": team.location.lat,
"lon": team.location.lon},
"round_host": round_idx}
round_teams[round_idx].append(team)
data.append(team_data)
print "write team data..."
with open("teams.json", "w+") as f:
json.dump(data, f)
def get_round_distances(from_teams, to_teams):
distances = defaultdict(dict)
global team_from, location_from, team_to, location_to, distance
for team_from in from_teams:
location_from = MapPoint.from_team(team_from)
for team_to in to_teams:
location_to = MapPoint.from_team(team_to)
distances[team_from.id][team_to.id] = simple_distance(location_from, location_to)
distance_data = []
print "get round 1 to 2 routes..."
distance_data.append(get_round_distances(round_teams[0], round_teams[1]))
print "get round 2 to 3 routes..."
distance_data.append(get_round_distances(round_teams[1], round_teams[2]))
print "write distance data..."
with open("distances.json", "w+") as f:
json.dump(distance_data, f)
|
Add a very simple script to export team data from the database as json files.
|
Add a very simple script to export team data from the database as json files.
|
Python
|
bsd-3-clause
|
eXma/meet-and-eat-registration-system,eXma/meet-and-eat-registration-system,janLo/meet-and-eat-registration-system,janLo/meet-and-eat-registration-system,eXma/meet-and-eat-registration-system,eXma/meet-and-eat-registration-system,janLo/meet-and-eat-registration-system,janLo/meet-and-eat-registration-system
|
Add a very simple script to export team data from the database as json files.
|
from collections import defaultdict
import json
from math import floor
import sys
import database as db
from database.model import Team
from geotools import simple_distance
from geotools.routing import MapPoint
from webapp.cfg.config import DB_CONNECTION
if len(sys.argv) == 2:
MAX_TEAMS = sys.argv[1]
else:
MAX_TEAMS = 9
print "init db..."
db.init_session(connection_string=DB_CONNECTION)
print "fetch teams..."
teams = db.session.query(Team).filter_by(deleted=False).filter_by(confirmed=True).order_by(Team.id).limit(MAX_TEAMS).all()
data = []
round_teams = defaultdict(list)
max_working = len(teams) - (len(teams) % 3)
divider = max_working / 3.0
def distance_sort(a, b):
if a.location.center_distance > b.location.center_distance:
return -1
if a.location.center_distance < b.location.center_distance:
return 1
return 0
working = teams[:max_working]
teams = sorted(working, distance_sort) + teams[max_working:]
for idx, team in enumerate(teams):
round_idx = 0
if (divider > 0):
round_idx = min(int(floor(idx / divider)), 3)
team_data = {"name": team.name,
"id": team.id,
"location": {"lat": team.location.lat,
"lon": team.location.lon},
"round_host": round_idx}
round_teams[round_idx].append(team)
data.append(team_data)
print "write team data..."
with open("teams.json", "w+") as f:
json.dump(data, f)
def get_round_distances(from_teams, to_teams):
distances = defaultdict(dict)
global team_from, location_from, team_to, location_to, distance
for team_from in from_teams:
location_from = MapPoint.from_team(team_from)
for team_to in to_teams:
location_to = MapPoint.from_team(team_to)
distances[team_from.id][team_to.id] = simple_distance(location_from, location_to)
distance_data = []
print "get round 1 to 2 routes..."
distance_data.append(get_round_distances(round_teams[0], round_teams[1]))
print "get round 2 to 3 routes..."
distance_data.append(get_round_distances(round_teams[1], round_teams[2]))
print "write distance data..."
with open("distances.json", "w+") as f:
json.dump(distance_data, f)
|
<commit_before><commit_msg>Add a very simple script to export team data from the database as json files.<commit_after>
|
from collections import defaultdict
import json
from math import floor
import sys
import database as db
from database.model import Team
from geotools import simple_distance
from geotools.routing import MapPoint
from webapp.cfg.config import DB_CONNECTION
if len(sys.argv) == 2:
MAX_TEAMS = sys.argv[1]
else:
MAX_TEAMS = 9
print "init db..."
db.init_session(connection_string=DB_CONNECTION)
print "fetch teams..."
teams = db.session.query(Team).filter_by(deleted=False).filter_by(confirmed=True).order_by(Team.id).limit(MAX_TEAMS).all()
data = []
round_teams = defaultdict(list)
max_working = len(teams) - (len(teams) % 3)
divider = max_working / 3.0
def distance_sort(a, b):
if a.location.center_distance > b.location.center_distance:
return -1
if a.location.center_distance < b.location.center_distance:
return 1
return 0
working = teams[:max_working]
teams = sorted(working, distance_sort) + teams[max_working:]
for idx, team in enumerate(teams):
round_idx = 0
if (divider > 0):
round_idx = min(int(floor(idx / divider)), 3)
team_data = {"name": team.name,
"id": team.id,
"location": {"lat": team.location.lat,
"lon": team.location.lon},
"round_host": round_idx}
round_teams[round_idx].append(team)
data.append(team_data)
print "write team data..."
with open("teams.json", "w+") as f:
json.dump(data, f)
def get_round_distances(from_teams, to_teams):
distances = defaultdict(dict)
global team_from, location_from, team_to, location_to, distance
for team_from in from_teams:
location_from = MapPoint.from_team(team_from)
for team_to in to_teams:
location_to = MapPoint.from_team(team_to)
distances[team_from.id][team_to.id] = simple_distance(location_from, location_to)
distance_data = []
print "get round 1 to 2 routes..."
distance_data.append(get_round_distances(round_teams[0], round_teams[1]))
print "get round 2 to 3 routes..."
distance_data.append(get_round_distances(round_teams[1], round_teams[2]))
print "write distance data..."
with open("distances.json", "w+") as f:
json.dump(distance_data, f)
|
Add a very simple script to export team data from the database as json files.from collections import defaultdict
import json
from math import floor
import sys
import database as db
from database.model import Team
from geotools import simple_distance
from geotools.routing import MapPoint
from webapp.cfg.config import DB_CONNECTION
if len(sys.argv) == 2:
MAX_TEAMS = sys.argv[1]
else:
MAX_TEAMS = 9
print "init db..."
db.init_session(connection_string=DB_CONNECTION)
print "fetch teams..."
teams = db.session.query(Team).filter_by(deleted=False).filter_by(confirmed=True).order_by(Team.id).limit(MAX_TEAMS).all()
data = []
round_teams = defaultdict(list)
max_working = len(teams) - (len(teams) % 3)
divider = max_working / 3.0
def distance_sort(a, b):
if a.location.center_distance > b.location.center_distance:
return -1
if a.location.center_distance < b.location.center_distance:
return 1
return 0
working = teams[:max_working]
teams = sorted(working, distance_sort) + teams[max_working:]
for idx, team in enumerate(teams):
round_idx = 0
if (divider > 0):
round_idx = min(int(floor(idx / divider)), 3)
team_data = {"name": team.name,
"id": team.id,
"location": {"lat": team.location.lat,
"lon": team.location.lon},
"round_host": round_idx}
round_teams[round_idx].append(team)
data.append(team_data)
print "write team data..."
with open("teams.json", "w+") as f:
json.dump(data, f)
def get_round_distances(from_teams, to_teams):
distances = defaultdict(dict)
global team_from, location_from, team_to, location_to, distance
for team_from in from_teams:
location_from = MapPoint.from_team(team_from)
for team_to in to_teams:
location_to = MapPoint.from_team(team_to)
distances[team_from.id][team_to.id] = simple_distance(location_from, location_to)
distance_data = []
print "get round 1 to 2 routes..."
distance_data.append(get_round_distances(round_teams[0], round_teams[1]))
print "get round 2 to 3 routes..."
distance_data.append(get_round_distances(round_teams[1], round_teams[2]))
print "write distance data..."
with open("distances.json", "w+") as f:
json.dump(distance_data, f)
|
<commit_before><commit_msg>Add a very simple script to export team data from the database as json files.<commit_after>from collections import defaultdict
import json
from math import floor
import sys
import database as db
from database.model import Team
from geotools import simple_distance
from geotools.routing import MapPoint
from webapp.cfg.config import DB_CONNECTION
if len(sys.argv) == 2:
MAX_TEAMS = sys.argv[1]
else:
MAX_TEAMS = 9
print "init db..."
db.init_session(connection_string=DB_CONNECTION)
print "fetch teams..."
teams = db.session.query(Team).filter_by(deleted=False).filter_by(confirmed=True).order_by(Team.id).limit(MAX_TEAMS).all()
data = []
round_teams = defaultdict(list)
max_working = len(teams) - (len(teams) % 3)
divider = max_working / 3.0
def distance_sort(a, b):
if a.location.center_distance > b.location.center_distance:
return -1
if a.location.center_distance < b.location.center_distance:
return 1
return 0
working = teams[:max_working]
teams = sorted(working, distance_sort) + teams[max_working:]
for idx, team in enumerate(teams):
round_idx = 0
if (divider > 0):
round_idx = min(int(floor(idx / divider)), 3)
team_data = {"name": team.name,
"id": team.id,
"location": {"lat": team.location.lat,
"lon": team.location.lon},
"round_host": round_idx}
round_teams[round_idx].append(team)
data.append(team_data)
print "write team data..."
with open("teams.json", "w+") as f:
json.dump(data, f)
def get_round_distances(from_teams, to_teams):
distances = defaultdict(dict)
global team_from, location_from, team_to, location_to, distance
for team_from in from_teams:
location_from = MapPoint.from_team(team_from)
for team_to in to_teams:
location_to = MapPoint.from_team(team_to)
distances[team_from.id][team_to.id] = simple_distance(location_from, location_to)
distance_data = []
print "get round 1 to 2 routes..."
distance_data.append(get_round_distances(round_teams[0], round_teams[1]))
print "get round 2 to 3 routes..."
distance_data.append(get_round_distances(round_teams[1], round_teams[2]))
print "write distance data..."
with open("distances.json", "w+") as f:
json.dump(distance_data, f)
|
|
b5a9c8e0c7ae377d9b1a3671170542f6a7fecde3
|
ecommerce/extensions/catalogue/migrations/0034_add_on_campus_coupon_category.py
|
ecommerce/extensions/catalogue/migrations/0034_add_on_campus_coupon_category.py
|
""" Add 'On-Campus Learners' to the list of default coupon categories"""
from django.db import migrations
from oscar.apps.catalogue.categories import create_from_breadcrumbs
from oscar.core.loading import get_model
Category = get_model('catalogue', 'Category')
COUPON_CATEGORY_NAME = 'Coupons'
ON_CAMPUS_CATEGORY = 'On-Campus Learners'
def create_on_campus_category(apps, schema_editor):
""" Create on-campus coupon category """
create_from_breadcrumbs('{} > {}'.format(COUPON_CATEGORY_NAME, ON_CAMPUS_CATEGORY))
def remove_on_campus_category(apps, schema_editor):
""" Remove on-campus coupon category """
Category.objects.get(
name=COUPON_CATEGORY_NAME
).get_children().filter(
name=ON_CAMPUS_CATEGORY
).delete()
class Migration(migrations.Migration):
dependencies = [
('catalogue', '0033_add_coupon_categories')
]
operations = [
migrations.RunPython(create_on_campus_category, remove_on_campus_category)
]
|
Create On-Campus Learner Coupon Category
|
REVE-183: Create On-Campus Learner Coupon Category
|
Python
|
agpl-3.0
|
edx/ecommerce,eduNEXT/edunext-ecommerce,eduNEXT/edunext-ecommerce,eduNEXT/edunext-ecommerce,edx/ecommerce,edx/ecommerce,edx/ecommerce,eduNEXT/edunext-ecommerce
|
REVE-183: Create On-Campus Learner Coupon Category
|
""" Add 'On-Campus Learners' to the list of default coupon categories"""
from django.db import migrations
from oscar.apps.catalogue.categories import create_from_breadcrumbs
from oscar.core.loading import get_model
Category = get_model('catalogue', 'Category')
COUPON_CATEGORY_NAME = 'Coupons'
ON_CAMPUS_CATEGORY = 'On-Campus Learners'
def create_on_campus_category(apps, schema_editor):
""" Create on-campus coupon category """
create_from_breadcrumbs('{} > {}'.format(COUPON_CATEGORY_NAME, ON_CAMPUS_CATEGORY))
def remove_on_campus_category(apps, schema_editor):
""" Remove on-campus coupon category """
Category.objects.get(
name=COUPON_CATEGORY_NAME
).get_children().filter(
name=ON_CAMPUS_CATEGORY
).delete()
class Migration(migrations.Migration):
dependencies = [
('catalogue', '0033_add_coupon_categories')
]
operations = [
migrations.RunPython(create_on_campus_category, remove_on_campus_category)
]
|
<commit_before><commit_msg>REVE-183: Create On-Campus Learner Coupon Category<commit_after>
|
""" Add 'On-Campus Learners' to the list of default coupon categories"""
from django.db import migrations
from oscar.apps.catalogue.categories import create_from_breadcrumbs
from oscar.core.loading import get_model
Category = get_model('catalogue', 'Category')
COUPON_CATEGORY_NAME = 'Coupons'
ON_CAMPUS_CATEGORY = 'On-Campus Learners'
def create_on_campus_category(apps, schema_editor):
""" Create on-campus coupon category """
create_from_breadcrumbs('{} > {}'.format(COUPON_CATEGORY_NAME, ON_CAMPUS_CATEGORY))
def remove_on_campus_category(apps, schema_editor):
""" Remove on-campus coupon category """
Category.objects.get(
name=COUPON_CATEGORY_NAME
).get_children().filter(
name=ON_CAMPUS_CATEGORY
).delete()
class Migration(migrations.Migration):
dependencies = [
('catalogue', '0033_add_coupon_categories')
]
operations = [
migrations.RunPython(create_on_campus_category, remove_on_campus_category)
]
|
REVE-183: Create On-Campus Learner Coupon Category""" Add 'On-Campus Learners' to the list of default coupon categories"""
from django.db import migrations
from oscar.apps.catalogue.categories import create_from_breadcrumbs
from oscar.core.loading import get_model
Category = get_model('catalogue', 'Category')
COUPON_CATEGORY_NAME = 'Coupons'
ON_CAMPUS_CATEGORY = 'On-Campus Learners'
def create_on_campus_category(apps, schema_editor):
""" Create on-campus coupon category """
create_from_breadcrumbs('{} > {}'.format(COUPON_CATEGORY_NAME, ON_CAMPUS_CATEGORY))
def remove_on_campus_category(apps, schema_editor):
""" Remove on-campus coupon category """
Category.objects.get(
name=COUPON_CATEGORY_NAME
).get_children().filter(
name=ON_CAMPUS_CATEGORY
).delete()
class Migration(migrations.Migration):
dependencies = [
('catalogue', '0033_add_coupon_categories')
]
operations = [
migrations.RunPython(create_on_campus_category, remove_on_campus_category)
]
|
<commit_before><commit_msg>REVE-183: Create On-Campus Learner Coupon Category<commit_after>""" Add 'On-Campus Learners' to the list of default coupon categories"""
from django.db import migrations
from oscar.apps.catalogue.categories import create_from_breadcrumbs
from oscar.core.loading import get_model
Category = get_model('catalogue', 'Category')
COUPON_CATEGORY_NAME = 'Coupons'
ON_CAMPUS_CATEGORY = 'On-Campus Learners'
def create_on_campus_category(apps, schema_editor):
""" Create on-campus coupon category """
create_from_breadcrumbs('{} > {}'.format(COUPON_CATEGORY_NAME, ON_CAMPUS_CATEGORY))
def remove_on_campus_category(apps, schema_editor):
""" Remove on-campus coupon category """
Category.objects.get(
name=COUPON_CATEGORY_NAME
).get_children().filter(
name=ON_CAMPUS_CATEGORY
).delete()
class Migration(migrations.Migration):
dependencies = [
('catalogue', '0033_add_coupon_categories')
]
operations = [
migrations.RunPython(create_on_campus_category, remove_on_campus_category)
]
|
|
9569ea1d7c3e91dc8f7ba63ca22aaddd2f3bcfca
|
djangae/contrib/auth/management/__init__.py
|
djangae/contrib/auth/management/__init__.py
|
from django.db import DEFAULT_DB_ALIAS, router
from django.db.models import get_model, get_models, signals, UnavailableApp
from django.contrib.auth import (models as auth_app, get_permission_codename,
get_user_model)
from django.contrib.auth.management import _get_all_permissions
def create_permissions(app, created_models, verbosity, db=DEFAULT_DB_ALIAS, **kwargs):
try:
get_model('auth', 'Permission')
except UnavailableApp:
return
if not router.allow_syncdb(db, auth_app.Permission):
return
from django.contrib.contenttypes.models import ContentType
app_models = get_models(app)
# This will hold the permissions we're looking for as
# (content_type, (codename, name))
searched_perms = list()
# The codenames and ctypes that should exist.
ctypes = set()
for klass in app_models:
# Force looking up the content types in the current database
# before creating foreign keys to them.
ctype = ContentType.objects.db_manager(db).get_for_model(klass)
ctypes.add(ctype)
for perm in _get_all_permissions(klass._meta, ctype):
searched_perms.append((ctype, perm))
# Find all the Permissions that have a content_type for a model we're
# looking for. We don't need to check for codenames since we already have
# a list of the ones we're going to create.
ctypes_to_get = list(ctypes)
all_perms = []
while ctypes_to_get:
all_perms.extend(list(auth_app.Permission.objects.using(db).filter(
content_type__in=ctypes_to_get[:30],
).values_list(
"content_type", "codename"
)))
ctypes_to_get = ctypes_to_get[30:]
ctypes_to_get = set(ctypes_to_get)
perms = [
auth_app.Permission(codename=codename, name=name, content_type=ctype)
for ctype, (codename, name) in searched_perms
if (ctype.pk, codename) not in all_perms
]
auth_app.Permission.objects.using(db).bulk_create(perms)
if verbosity >= 2:
for perm in perms:
print("Adding permission '%s'" % perm)
#Disconnect the default create_permissions handler which tries to do too many IN queries in some cases
signals.post_syncdb.disconnect(dispatch_uid="django.contrib.auth.management.create_permissions")
#Connect our one in its place
signals.post_syncdb.connect(create_permissions,
dispatch_uid="djangae.contrib.auth.management.create_permissions")
|
Add an override for create_permissions which batches the lookups
|
Add an override for create_permissions which batches the lookups
|
Python
|
bsd-3-clause
|
stucox/djangae,nealedj/djangae,SiPiggles/djangae,b-cannon/my_djae,armirusco/djangae,armirusco/djangae,martinogden/djangae,stucox/djangae,martinogden/djangae,kirberich/djangae,grzes/djangae,grzes/djangae,leekchan/djangae,potatolondon/djangae,martinogden/djangae,chargrizzle/djangae,chargrizzle/djangae,asendecka/djangae,SiPiggles/djangae,armirusco/djangae,jscissr/djangae,wangjun/djangae,jscissr/djangae,asendecka/djangae,leekchan/djangae,wangjun/djangae,trik/djangae,trik/djangae,jscissr/djangae,grzes/djangae,nealedj/djangae,chargrizzle/djangae,wangjun/djangae,pablorecio/djangae,SiPiggles/djangae,kirberich/djangae,potatolondon/djangae,asendecka/djangae,pablorecio/djangae,pablorecio/djangae,leekchan/djangae,kirberich/djangae,trik/djangae,stucox/djangae,nealedj/djangae
|
Add an override for create_permissions which batches the lookups
|
from django.db import DEFAULT_DB_ALIAS, router
from django.db.models import get_model, get_models, signals, UnavailableApp
from django.contrib.auth import (models as auth_app, get_permission_codename,
get_user_model)
from django.contrib.auth.management import _get_all_permissions
def create_permissions(app, created_models, verbosity, db=DEFAULT_DB_ALIAS, **kwargs):
try:
get_model('auth', 'Permission')
except UnavailableApp:
return
if not router.allow_syncdb(db, auth_app.Permission):
return
from django.contrib.contenttypes.models import ContentType
app_models = get_models(app)
# This will hold the permissions we're looking for as
# (content_type, (codename, name))
searched_perms = list()
# The codenames and ctypes that should exist.
ctypes = set()
for klass in app_models:
# Force looking up the content types in the current database
# before creating foreign keys to them.
ctype = ContentType.objects.db_manager(db).get_for_model(klass)
ctypes.add(ctype)
for perm in _get_all_permissions(klass._meta, ctype):
searched_perms.append((ctype, perm))
# Find all the Permissions that have a content_type for a model we're
# looking for. We don't need to check for codenames since we already have
# a list of the ones we're going to create.
ctypes_to_get = list(ctypes)
all_perms = []
while ctypes_to_get:
all_perms.extend(list(auth_app.Permission.objects.using(db).filter(
content_type__in=ctypes_to_get[:30],
).values_list(
"content_type", "codename"
)))
ctypes_to_get = ctypes_to_get[30:]
ctypes_to_get = set(ctypes_to_get)
perms = [
auth_app.Permission(codename=codename, name=name, content_type=ctype)
for ctype, (codename, name) in searched_perms
if (ctype.pk, codename) not in all_perms
]
auth_app.Permission.objects.using(db).bulk_create(perms)
if verbosity >= 2:
for perm in perms:
print("Adding permission '%s'" % perm)
#Disconnect the default create_permissions handler which tries to do too many IN queries in some cases
signals.post_syncdb.disconnect(dispatch_uid="django.contrib.auth.management.create_permissions")
#Connect our one in its place
signals.post_syncdb.connect(create_permissions,
dispatch_uid="djangae.contrib.auth.management.create_permissions")
|
<commit_before><commit_msg>Add an override for create_permissions which batches the lookups<commit_after>
|
from django.db import DEFAULT_DB_ALIAS, router
from django.db.models import get_model, get_models, signals, UnavailableApp
from django.contrib.auth import (models as auth_app, get_permission_codename,
get_user_model)
from django.contrib.auth.management import _get_all_permissions
def create_permissions(app, created_models, verbosity, db=DEFAULT_DB_ALIAS, **kwargs):
try:
get_model('auth', 'Permission')
except UnavailableApp:
return
if not router.allow_syncdb(db, auth_app.Permission):
return
from django.contrib.contenttypes.models import ContentType
app_models = get_models(app)
# This will hold the permissions we're looking for as
# (content_type, (codename, name))
searched_perms = list()
# The codenames and ctypes that should exist.
ctypes = set()
for klass in app_models:
# Force looking up the content types in the current database
# before creating foreign keys to them.
ctype = ContentType.objects.db_manager(db).get_for_model(klass)
ctypes.add(ctype)
for perm in _get_all_permissions(klass._meta, ctype):
searched_perms.append((ctype, perm))
# Find all the Permissions that have a content_type for a model we're
# looking for. We don't need to check for codenames since we already have
# a list of the ones we're going to create.
ctypes_to_get = list(ctypes)
all_perms = []
while ctypes_to_get:
all_perms.extend(list(auth_app.Permission.objects.using(db).filter(
content_type__in=ctypes_to_get[:30],
).values_list(
"content_type", "codename"
)))
ctypes_to_get = ctypes_to_get[30:]
ctypes_to_get = set(ctypes_to_get)
perms = [
auth_app.Permission(codename=codename, name=name, content_type=ctype)
for ctype, (codename, name) in searched_perms
if (ctype.pk, codename) not in all_perms
]
auth_app.Permission.objects.using(db).bulk_create(perms)
if verbosity >= 2:
for perm in perms:
print("Adding permission '%s'" % perm)
#Disconnect the default create_permissions handler which tries to do too many IN queries in some cases
signals.post_syncdb.disconnect(dispatch_uid="django.contrib.auth.management.create_permissions")
#Connect our one in its place
signals.post_syncdb.connect(create_permissions,
dispatch_uid="djangae.contrib.auth.management.create_permissions")
|
Add an override for create_permissions which batches the lookupsfrom django.db import DEFAULT_DB_ALIAS, router
from django.db.models import get_model, get_models, signals, UnavailableApp
from django.contrib.auth import (models as auth_app, get_permission_codename,
get_user_model)
from django.contrib.auth.management import _get_all_permissions
def create_permissions(app, created_models, verbosity, db=DEFAULT_DB_ALIAS, **kwargs):
try:
get_model('auth', 'Permission')
except UnavailableApp:
return
if not router.allow_syncdb(db, auth_app.Permission):
return
from django.contrib.contenttypes.models import ContentType
app_models = get_models(app)
# This will hold the permissions we're looking for as
# (content_type, (codename, name))
searched_perms = list()
# The codenames and ctypes that should exist.
ctypes = set()
for klass in app_models:
# Force looking up the content types in the current database
# before creating foreign keys to them.
ctype = ContentType.objects.db_manager(db).get_for_model(klass)
ctypes.add(ctype)
for perm in _get_all_permissions(klass._meta, ctype):
searched_perms.append((ctype, perm))
# Find all the Permissions that have a content_type for a model we're
# looking for. We don't need to check for codenames since we already have
# a list of the ones we're going to create.
ctypes_to_get = list(ctypes)
all_perms = []
while ctypes_to_get:
all_perms.extend(list(auth_app.Permission.objects.using(db).filter(
content_type__in=ctypes_to_get[:30],
).values_list(
"content_type", "codename"
)))
ctypes_to_get = ctypes_to_get[30:]
ctypes_to_get = set(ctypes_to_get)
perms = [
auth_app.Permission(codename=codename, name=name, content_type=ctype)
for ctype, (codename, name) in searched_perms
if (ctype.pk, codename) not in all_perms
]
auth_app.Permission.objects.using(db).bulk_create(perms)
if verbosity >= 2:
for perm in perms:
print("Adding permission '%s'" % perm)
#Disconnect the default create_permissions handler which tries to do too many IN queries in some cases
signals.post_syncdb.disconnect(dispatch_uid="django.contrib.auth.management.create_permissions")
#Connect our one in its place
signals.post_syncdb.connect(create_permissions,
dispatch_uid="djangae.contrib.auth.management.create_permissions")
|
<commit_before><commit_msg>Add an override for create_permissions which batches the lookups<commit_after>from django.db import DEFAULT_DB_ALIAS, router
from django.db.models import get_model, get_models, signals, UnavailableApp
from django.contrib.auth import (models as auth_app, get_permission_codename,
get_user_model)
from django.contrib.auth.management import _get_all_permissions
def create_permissions(app, created_models, verbosity, db=DEFAULT_DB_ALIAS, **kwargs):
try:
get_model('auth', 'Permission')
except UnavailableApp:
return
if not router.allow_syncdb(db, auth_app.Permission):
return
from django.contrib.contenttypes.models import ContentType
app_models = get_models(app)
# This will hold the permissions we're looking for as
# (content_type, (codename, name))
searched_perms = list()
# The codenames and ctypes that should exist.
ctypes = set()
for klass in app_models:
# Force looking up the content types in the current database
# before creating foreign keys to them.
ctype = ContentType.objects.db_manager(db).get_for_model(klass)
ctypes.add(ctype)
for perm in _get_all_permissions(klass._meta, ctype):
searched_perms.append((ctype, perm))
# Find all the Permissions that have a content_type for a model we're
# looking for. We don't need to check for codenames since we already have
# a list of the ones we're going to create.
ctypes_to_get = list(ctypes)
all_perms = []
while ctypes_to_get:
all_perms.extend(list(auth_app.Permission.objects.using(db).filter(
content_type__in=ctypes_to_get[:30],
).values_list(
"content_type", "codename"
)))
ctypes_to_get = ctypes_to_get[30:]
ctypes_to_get = set(ctypes_to_get)
perms = [
auth_app.Permission(codename=codename, name=name, content_type=ctype)
for ctype, (codename, name) in searched_perms
if (ctype.pk, codename) not in all_perms
]
auth_app.Permission.objects.using(db).bulk_create(perms)
if verbosity >= 2:
for perm in perms:
print("Adding permission '%s'" % perm)
#Disconnect the default create_permissions handler which tries to do too many IN queries in some cases
signals.post_syncdb.disconnect(dispatch_uid="django.contrib.auth.management.create_permissions")
#Connect our one in its place
signals.post_syncdb.connect(create_permissions,
dispatch_uid="djangae.contrib.auth.management.create_permissions")
|
|
5c2ed17e84f435380ded64e0b5d1703f8e3b54c9
|
csunplugged/tests/infrastructure/test_deployment.py
|
csunplugged/tests/infrastructure/test_deployment.py
|
"""Tests deployments."""
import os
import re
from tests.BaseTest import BaseTest
class DeploymentTest(BaseTest):
"""Tests for deployments."""
def test_dev_deploy_scripts_are_called(self):
# Create list of script files
FILES_TO_SKIP = (
'decrypt-dev-secrets.sh',
'load-dev-deploy-config-envs.sh',
)
filenames = set()
path = "../infrastructure/dev-deploy/"
for filename in sorted(os.listdir(path)):
if filename.endswith('.sh') and filename not in FILES_TO_SKIP:
filenames.add(filename)
with open("../.travis.yml", "r") as f:
deployment_contents = f.read()
regex = re.compile(r'bash \./infrastructure/dev-deploy/(.*)$', flags=re.MULTILINE)
results = re.findall(regex, deployment_contents)
for called_filename in results:
filenames.remove(called_filename)
# Check if any files are missed
error_text = ''
for filename in filenames:
error_text += f"\n'{filename}' is not called during dev deployment"
if error_text:
raise Exception(error_text)
def test_prod_deploy_scripts_are_called(self):
# Create list of script files
FILES_TO_SKIP = (
'decrypt-prod-secrets.sh',
'load-prod-deploy-config-envs.sh',
)
filenames = set()
path = "../infrastructure/prod-deploy/"
for filename in sorted(os.listdir(path)):
if filename.endswith('.sh') and filename not in FILES_TO_SKIP:
filenames.add(filename)
with open("../.travis.yml", "r") as f:
deployment_contents = f.read()
regex = re.compile(r'bash \./infrastructure/prod-deploy/(.*)$', flags=re.MULTILINE)
results = re.findall(regex, deployment_contents)
for called_filename in results:
filenames.remove(called_filename)
# Check if any files are missed
error_text = ''
for filename in filenames:
error_text += f"\n'{filename}' is not called during prod deployment"
if error_text:
raise Exception(error_text)
|
Add test to check resource generation files are called
|
Add test to check resource generation files are called
|
Python
|
mit
|
uccser/cs-unplugged,uccser/cs-unplugged,uccser/cs-unplugged,uccser/cs-unplugged
|
Add test to check resource generation files are called
|
"""Tests deployments."""
import os
import re
from tests.BaseTest import BaseTest
class DeploymentTest(BaseTest):
"""Tests for deployments."""
def test_dev_deploy_scripts_are_called(self):
# Create list of script files
FILES_TO_SKIP = (
'decrypt-dev-secrets.sh',
'load-dev-deploy-config-envs.sh',
)
filenames = set()
path = "../infrastructure/dev-deploy/"
for filename in sorted(os.listdir(path)):
if filename.endswith('.sh') and filename not in FILES_TO_SKIP:
filenames.add(filename)
with open("../.travis.yml", "r") as f:
deployment_contents = f.read()
regex = re.compile(r'bash \./infrastructure/dev-deploy/(.*)$', flags=re.MULTILINE)
results = re.findall(regex, deployment_contents)
for called_filename in results:
filenames.remove(called_filename)
# Check if any files are missed
error_text = ''
for filename in filenames:
error_text += f"\n'{filename}' is not called during dev deployment"
if error_text:
raise Exception(error_text)
def test_prod_deploy_scripts_are_called(self):
# Create list of script files
FILES_TO_SKIP = (
'decrypt-prod-secrets.sh',
'load-prod-deploy-config-envs.sh',
)
filenames = set()
path = "../infrastructure/prod-deploy/"
for filename in sorted(os.listdir(path)):
if filename.endswith('.sh') and filename not in FILES_TO_SKIP:
filenames.add(filename)
with open("../.travis.yml", "r") as f:
deployment_contents = f.read()
regex = re.compile(r'bash \./infrastructure/prod-deploy/(.*)$', flags=re.MULTILINE)
results = re.findall(regex, deployment_contents)
for called_filename in results:
filenames.remove(called_filename)
# Check if any files are missed
error_text = ''
for filename in filenames:
error_text += f"\n'{filename}' is not called during prod deployment"
if error_text:
raise Exception(error_text)
|
<commit_before><commit_msg>Add test to check resource generation files are called<commit_after>
|
"""Tests deployments."""
import os
import re
from tests.BaseTest import BaseTest
class DeploymentTest(BaseTest):
"""Tests for deployments."""
def test_dev_deploy_scripts_are_called(self):
# Create list of script files
FILES_TO_SKIP = (
'decrypt-dev-secrets.sh',
'load-dev-deploy-config-envs.sh',
)
filenames = set()
path = "../infrastructure/dev-deploy/"
for filename in sorted(os.listdir(path)):
if filename.endswith('.sh') and filename not in FILES_TO_SKIP:
filenames.add(filename)
with open("../.travis.yml", "r") as f:
deployment_contents = f.read()
regex = re.compile(r'bash \./infrastructure/dev-deploy/(.*)$', flags=re.MULTILINE)
results = re.findall(regex, deployment_contents)
for called_filename in results:
filenames.remove(called_filename)
# Check if any files are missed
error_text = ''
for filename in filenames:
error_text += f"\n'{filename}' is not called during dev deployment"
if error_text:
raise Exception(error_text)
def test_prod_deploy_scripts_are_called(self):
# Create list of script files
FILES_TO_SKIP = (
'decrypt-prod-secrets.sh',
'load-prod-deploy-config-envs.sh',
)
filenames = set()
path = "../infrastructure/prod-deploy/"
for filename in sorted(os.listdir(path)):
if filename.endswith('.sh') and filename not in FILES_TO_SKIP:
filenames.add(filename)
with open("../.travis.yml", "r") as f:
deployment_contents = f.read()
regex = re.compile(r'bash \./infrastructure/prod-deploy/(.*)$', flags=re.MULTILINE)
results = re.findall(regex, deployment_contents)
for called_filename in results:
filenames.remove(called_filename)
# Check if any files are missed
error_text = ''
for filename in filenames:
error_text += f"\n'{filename}' is not called during prod deployment"
if error_text:
raise Exception(error_text)
|
Add test to check resource generation files are called"""Tests deployments."""
import os
import re
from tests.BaseTest import BaseTest
class DeploymentTest(BaseTest):
"""Tests for deployments."""
def test_dev_deploy_scripts_are_called(self):
# Create list of script files
FILES_TO_SKIP = (
'decrypt-dev-secrets.sh',
'load-dev-deploy-config-envs.sh',
)
filenames = set()
path = "../infrastructure/dev-deploy/"
for filename in sorted(os.listdir(path)):
if filename.endswith('.sh') and filename not in FILES_TO_SKIP:
filenames.add(filename)
with open("../.travis.yml", "r") as f:
deployment_contents = f.read()
regex = re.compile(r'bash \./infrastructure/dev-deploy/(.*)$', flags=re.MULTILINE)
results = re.findall(regex, deployment_contents)
for called_filename in results:
filenames.remove(called_filename)
# Check if any files are missed
error_text = ''
for filename in filenames:
error_text += f"\n'{filename}' is not called during dev deployment"
if error_text:
raise Exception(error_text)
def test_prod_deploy_scripts_are_called(self):
# Create list of script files
FILES_TO_SKIP = (
'decrypt-prod-secrets.sh',
'load-prod-deploy-config-envs.sh',
)
filenames = set()
path = "../infrastructure/prod-deploy/"
for filename in sorted(os.listdir(path)):
if filename.endswith('.sh') and filename not in FILES_TO_SKIP:
filenames.add(filename)
with open("../.travis.yml", "r") as f:
deployment_contents = f.read()
regex = re.compile(r'bash \./infrastructure/prod-deploy/(.*)$', flags=re.MULTILINE)
results = re.findall(regex, deployment_contents)
for called_filename in results:
filenames.remove(called_filename)
# Check if any files are missed
error_text = ''
for filename in filenames:
error_text += f"\n'{filename}' is not called during prod deployment"
if error_text:
raise Exception(error_text)
|
<commit_before><commit_msg>Add test to check resource generation files are called<commit_after>"""Tests deployments."""
import os
import re
from tests.BaseTest import BaseTest
class DeploymentTest(BaseTest):
"""Tests for deployments."""
def test_dev_deploy_scripts_are_called(self):
# Create list of script files
FILES_TO_SKIP = (
'decrypt-dev-secrets.sh',
'load-dev-deploy-config-envs.sh',
)
filenames = set()
path = "../infrastructure/dev-deploy/"
for filename in sorted(os.listdir(path)):
if filename.endswith('.sh') and filename not in FILES_TO_SKIP:
filenames.add(filename)
with open("../.travis.yml", "r") as f:
deployment_contents = f.read()
regex = re.compile(r'bash \./infrastructure/dev-deploy/(.*)$', flags=re.MULTILINE)
results = re.findall(regex, deployment_contents)
for called_filename in results:
filenames.remove(called_filename)
# Check if any files are missed
error_text = ''
for filename in filenames:
error_text += f"\n'{filename}' is not called during dev deployment"
if error_text:
raise Exception(error_text)
def test_prod_deploy_scripts_are_called(self):
# Create list of script files
FILES_TO_SKIP = (
'decrypt-prod-secrets.sh',
'load-prod-deploy-config-envs.sh',
)
filenames = set()
path = "../infrastructure/prod-deploy/"
for filename in sorted(os.listdir(path)):
if filename.endswith('.sh') and filename not in FILES_TO_SKIP:
filenames.add(filename)
with open("../.travis.yml", "r") as f:
deployment_contents = f.read()
regex = re.compile(r'bash \./infrastructure/prod-deploy/(.*)$', flags=re.MULTILINE)
results = re.findall(regex, deployment_contents)
for called_filename in results:
filenames.remove(called_filename)
# Check if any files are missed
error_text = ''
for filename in filenames:
error_text += f"\n'{filename}' is not called during prod deployment"
if error_text:
raise Exception(error_text)
|
|
5ae4da11c2a98321864569b900338f3e78cd22d6
|
flexx/app/tests/test_live.py
|
flexx/app/tests/test_live.py
|
""" Test a live app connection.
"""
from flexx import app, react
from flexx.util.testing import run_tests_if_main, raises
def runner(cls):
t = app.launch(cls, 'xul')
t.test_init()
app.run()
t.test_check()
class BaseTesterApp(app.Model):
@react.input
def input(v):
return v
@react.input
def output(v):
return v
@react.connect('output')
def _done(self, v):
self._result = v
self.session.close()
class JS:
@react.connect('input')
def _handle_input(self, v):
#print('handling, setting output', v)
self.output(v + 1)
class TesterApp1(BaseTesterApp):
def test_init(self):
self.input(3)
self._result = None
def test_check(self):
assert self._result == 4
class TesterApp2(BaseTesterApp):
def test_init(self):
self.input('foo')
self._result = None
def test_check(self):
assert self._result == 'foo1'
def test_apps():
runner(TesterApp1)
runner(TesterApp2)
run_tests_if_main()
|
Add test for a life connection with a browser to test important code paths.
|
Add test for a life connection with a browser to test important code paths.
|
Python
|
bsd-2-clause
|
JohnLunzer/flexx,jrversteegh/flexx,zoofIO/flexx,JohnLunzer/flexx,zoofIO/flexx,JohnLunzer/flexx,jrversteegh/flexx
|
Add test for a life connection with a browser to test important code paths.
|
""" Test a live app connection.
"""
from flexx import app, react
from flexx.util.testing import run_tests_if_main, raises
def runner(cls):
t = app.launch(cls, 'xul')
t.test_init()
app.run()
t.test_check()
class BaseTesterApp(app.Model):
@react.input
def input(v):
return v
@react.input
def output(v):
return v
@react.connect('output')
def _done(self, v):
self._result = v
self.session.close()
class JS:
@react.connect('input')
def _handle_input(self, v):
#print('handling, setting output', v)
self.output(v + 1)
class TesterApp1(BaseTesterApp):
def test_init(self):
self.input(3)
self._result = None
def test_check(self):
assert self._result == 4
class TesterApp2(BaseTesterApp):
def test_init(self):
self.input('foo')
self._result = None
def test_check(self):
assert self._result == 'foo1'
def test_apps():
runner(TesterApp1)
runner(TesterApp2)
run_tests_if_main()
|
<commit_before><commit_msg>Add test for a life connection with a browser to test important code paths.<commit_after>
|
""" Test a live app connection.
"""
from flexx import app, react
from flexx.util.testing import run_tests_if_main, raises
def runner(cls):
t = app.launch(cls, 'xul')
t.test_init()
app.run()
t.test_check()
class BaseTesterApp(app.Model):
@react.input
def input(v):
return v
@react.input
def output(v):
return v
@react.connect('output')
def _done(self, v):
self._result = v
self.session.close()
class JS:
@react.connect('input')
def _handle_input(self, v):
#print('handling, setting output', v)
self.output(v + 1)
class TesterApp1(BaseTesterApp):
def test_init(self):
self.input(3)
self._result = None
def test_check(self):
assert self._result == 4
class TesterApp2(BaseTesterApp):
def test_init(self):
self.input('foo')
self._result = None
def test_check(self):
assert self._result == 'foo1'
def test_apps():
runner(TesterApp1)
runner(TesterApp2)
run_tests_if_main()
|
Add test for a life connection with a browser to test important code paths.""" Test a live app connection.
"""
from flexx import app, react
from flexx.util.testing import run_tests_if_main, raises
def runner(cls):
t = app.launch(cls, 'xul')
t.test_init()
app.run()
t.test_check()
class BaseTesterApp(app.Model):
@react.input
def input(v):
return v
@react.input
def output(v):
return v
@react.connect('output')
def _done(self, v):
self._result = v
self.session.close()
class JS:
@react.connect('input')
def _handle_input(self, v):
#print('handling, setting output', v)
self.output(v + 1)
class TesterApp1(BaseTesterApp):
def test_init(self):
self.input(3)
self._result = None
def test_check(self):
assert self._result == 4
class TesterApp2(BaseTesterApp):
def test_init(self):
self.input('foo')
self._result = None
def test_check(self):
assert self._result == 'foo1'
def test_apps():
runner(TesterApp1)
runner(TesterApp2)
run_tests_if_main()
|
<commit_before><commit_msg>Add test for a life connection with a browser to test important code paths.<commit_after>""" Test a live app connection.
"""
from flexx import app, react
from flexx.util.testing import run_tests_if_main, raises
def runner(cls):
t = app.launch(cls, 'xul')
t.test_init()
app.run()
t.test_check()
class BaseTesterApp(app.Model):
@react.input
def input(v):
return v
@react.input
def output(v):
return v
@react.connect('output')
def _done(self, v):
self._result = v
self.session.close()
class JS:
@react.connect('input')
def _handle_input(self, v):
#print('handling, setting output', v)
self.output(v + 1)
class TesterApp1(BaseTesterApp):
def test_init(self):
self.input(3)
self._result = None
def test_check(self):
assert self._result == 4
class TesterApp2(BaseTesterApp):
def test_init(self):
self.input('foo')
self._result = None
def test_check(self):
assert self._result == 'foo1'
def test_apps():
runner(TesterApp1)
runner(TesterApp2)
run_tests_if_main()
|
|
7a838753853f172078068ba37e3462e3093f32c6
|
leetcode/289-Game-of-Life/GameofLife_001.py
|
leetcode/289-Game-of-Life/GameofLife_001.py
|
class Solution(object):
def gameOfLife(self, board):
"""
:type board: List[List[int]]
:rtype: void Do not return anything, modify board in-place instead.
"""
m, n = len(board), len(board[0])
for i in xrange(m):
for j in xrange(n):
cnt = 0
dx = [-1, 0, 1]
dy = [-1, 0, 1]
for di in dx:
for dj in dy:
if di == 0 and dj == 0:
continue
idi, jdj = i + di, j + dj
if 0 <= idi < m and 0 <= jdj < n and board[idi][jdj] & 1 > 0:
cnt += 1
if board[i][j]:
if 2 <= cnt <= 3:
board[i][j] = 3
elif cnt == 3:
board[i][j] = 2
for i in xrange(m):
for j in xrange(n):
board[i][j] = board[i][j] >> 1
|
Add Game of Life for lc
|
Add Game of Life for lc
|
Python
|
mit
|
cc13ny/Allin,cc13ny/algo,Chasego/cod,cc13ny/Allin,Chasego/cod,cc13ny/algo,Chasego/codirit,Chasego/codirit,cc13ny/Allin,Chasego/codirit,Chasego/codi,Chasego/codi,Chasego/codi,cc13ny/Allin,Chasego/codirit,Chasego/cod,cc13ny/algo,Chasego/cod,Chasego/codi,cc13ny/Allin,Chasego/codirit,Chasego/codi,cc13ny/algo,Chasego/cod,cc13ny/algo
|
Add Game of Life for lc
|
class Solution(object):
def gameOfLife(self, board):
"""
:type board: List[List[int]]
:rtype: void Do not return anything, modify board in-place instead.
"""
m, n = len(board), len(board[0])
for i in xrange(m):
for j in xrange(n):
cnt = 0
dx = [-1, 0, 1]
dy = [-1, 0, 1]
for di in dx:
for dj in dy:
if di == 0 and dj == 0:
continue
idi, jdj = i + di, j + dj
if 0 <= idi < m and 0 <= jdj < n and board[idi][jdj] & 1 > 0:
cnt += 1
if board[i][j]:
if 2 <= cnt <= 3:
board[i][j] = 3
elif cnt == 3:
board[i][j] = 2
for i in xrange(m):
for j in xrange(n):
board[i][j] = board[i][j] >> 1
|
<commit_before><commit_msg>Add Game of Life for lc<commit_after>
|
class Solution(object):
def gameOfLife(self, board):
"""
:type board: List[List[int]]
:rtype: void Do not return anything, modify board in-place instead.
"""
m, n = len(board), len(board[0])
for i in xrange(m):
for j in xrange(n):
cnt = 0
dx = [-1, 0, 1]
dy = [-1, 0, 1]
for di in dx:
for dj in dy:
if di == 0 and dj == 0:
continue
idi, jdj = i + di, j + dj
if 0 <= idi < m and 0 <= jdj < n and board[idi][jdj] & 1 > 0:
cnt += 1
if board[i][j]:
if 2 <= cnt <= 3:
board[i][j] = 3
elif cnt == 3:
board[i][j] = 2
for i in xrange(m):
for j in xrange(n):
board[i][j] = board[i][j] >> 1
|
Add Game of Life for lcclass Solution(object):
def gameOfLife(self, board):
"""
:type board: List[List[int]]
:rtype: void Do not return anything, modify board in-place instead.
"""
m, n = len(board), len(board[0])
for i in xrange(m):
for j in xrange(n):
cnt = 0
dx = [-1, 0, 1]
dy = [-1, 0, 1]
for di in dx:
for dj in dy:
if di == 0 and dj == 0:
continue
idi, jdj = i + di, j + dj
if 0 <= idi < m and 0 <= jdj < n and board[idi][jdj] & 1 > 0:
cnt += 1
if board[i][j]:
if 2 <= cnt <= 3:
board[i][j] = 3
elif cnt == 3:
board[i][j] = 2
for i in xrange(m):
for j in xrange(n):
board[i][j] = board[i][j] >> 1
|
<commit_before><commit_msg>Add Game of Life for lc<commit_after>class Solution(object):
def gameOfLife(self, board):
"""
:type board: List[List[int]]
:rtype: void Do not return anything, modify board in-place instead.
"""
m, n = len(board), len(board[0])
for i in xrange(m):
for j in xrange(n):
cnt = 0
dx = [-1, 0, 1]
dy = [-1, 0, 1]
for di in dx:
for dj in dy:
if di == 0 and dj == 0:
continue
idi, jdj = i + di, j + dj
if 0 <= idi < m and 0 <= jdj < n and board[idi][jdj] & 1 > 0:
cnt += 1
if board[i][j]:
if 2 <= cnt <= 3:
board[i][j] = 3
elif cnt == 3:
board[i][j] = 2
for i in xrange(m):
for j in xrange(n):
board[i][j] = board[i][j] >> 1
|
|
a14ce43c2d318f864f70392eab16145ec6bc4733
|
support/jenkins/buildAllModuleCombination.py
|
support/jenkins/buildAllModuleCombination.py
|
import os
from subprocess import call
from itertools import product, repeat
# To be called from the OpenSpace main folder
modules = os.listdir("modules")
modules.remove("base")
# Get 2**len(modules) combinatorical combinations of ON/OFF
settings = []
for args in product(*repeat(("ON", "OFF"), len(modules))):
settings.append(args)
# Create all commands
cmds = []
for s in settings:
cmd = ["cmake", "-DGHOUL_USE_DEVIL=NO", "-DOPENSPACE_MODULE_BASE=ON"]
for m,s in zip(modules, s):
cmd.append("-DOPENSPACE_MODULE_" + m.upper() + "=" + s)
cmds.append(cmd)
# Build cmake and compile
for c in cmds:
call(cmd)
call(["make", "clean"])
call(["make", "-j4"])
|
Add Python script to build all combinations of modules
|
Add Python script to build all combinations of modules
|
Python
|
mit
|
OpenSpace/OpenSpace,OpenSpace/OpenSpace,OpenSpace/OpenSpace,OpenSpace/OpenSpace
|
Add Python script to build all combinations of modules
|
import os
from subprocess import call
from itertools import product, repeat
# To be called from the OpenSpace main folder
modules = os.listdir("modules")
modules.remove("base")
# Get 2**len(modules) combinatorical combinations of ON/OFF
settings = []
for args in product(*repeat(("ON", "OFF"), len(modules))):
settings.append(args)
# Create all commands
cmds = []
for s in settings:
cmd = ["cmake", "-DGHOUL_USE_DEVIL=NO", "-DOPENSPACE_MODULE_BASE=ON"]
for m,s in zip(modules, s):
cmd.append("-DOPENSPACE_MODULE_" + m.upper() + "=" + s)
cmds.append(cmd)
# Build cmake and compile
for c in cmds:
call(cmd)
call(["make", "clean"])
call(["make", "-j4"])
|
<commit_before><commit_msg>Add Python script to build all combinations of modules<commit_after>
|
import os
from subprocess import call
from itertools import product, repeat
# To be called from the OpenSpace main folder
modules = os.listdir("modules")
modules.remove("base")
# Get 2**len(modules) combinatorical combinations of ON/OFF
settings = []
for args in product(*repeat(("ON", "OFF"), len(modules))):
settings.append(args)
# Create all commands
cmds = []
for s in settings:
cmd = ["cmake", "-DGHOUL_USE_DEVIL=NO", "-DOPENSPACE_MODULE_BASE=ON"]
for m,s in zip(modules, s):
cmd.append("-DOPENSPACE_MODULE_" + m.upper() + "=" + s)
cmds.append(cmd)
# Build cmake and compile
for c in cmds:
call(cmd)
call(["make", "clean"])
call(["make", "-j4"])
|
Add Python script to build all combinations of modulesimport os
from subprocess import call
from itertools import product, repeat
# To be called from the OpenSpace main folder
modules = os.listdir("modules")
modules.remove("base")
# Get 2**len(modules) combinatorical combinations of ON/OFF
settings = []
for args in product(*repeat(("ON", "OFF"), len(modules))):
settings.append(args)
# Create all commands
cmds = []
for s in settings:
cmd = ["cmake", "-DGHOUL_USE_DEVIL=NO", "-DOPENSPACE_MODULE_BASE=ON"]
for m,s in zip(modules, s):
cmd.append("-DOPENSPACE_MODULE_" + m.upper() + "=" + s)
cmds.append(cmd)
# Build cmake and compile
for c in cmds:
call(cmd)
call(["make", "clean"])
call(["make", "-j4"])
|
<commit_before><commit_msg>Add Python script to build all combinations of modules<commit_after>import os
from subprocess import call
from itertools import product, repeat
# To be called from the OpenSpace main folder
modules = os.listdir("modules")
modules.remove("base")
# Get 2**len(modules) combinatorical combinations of ON/OFF
settings = []
for args in product(*repeat(("ON", "OFF"), len(modules))):
settings.append(args)
# Create all commands
cmds = []
for s in settings:
cmd = ["cmake", "-DGHOUL_USE_DEVIL=NO", "-DOPENSPACE_MODULE_BASE=ON"]
for m,s in zip(modules, s):
cmd.append("-DOPENSPACE_MODULE_" + m.upper() + "=" + s)
cmds.append(cmd)
# Build cmake and compile
for c in cmds:
call(cmd)
call(["make", "clean"])
call(["make", "-j4"])
|
|
e42f488ae779168c10d6a01bd04dfd2f6c3c311d
|
restalchemy/api/controllers.py
|
restalchemy/api/controllers.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2014 Eugene Frolov <eugene@frolov.net.ru>
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from restalchemy.common import exceptions as exc
class Controller(object):
__resource__ = None
@classmethod
def get_resource(cls):
return cls.__resource__
@property
def model(self):
return self.get_resource()
def create(self, **kwargs):
raise exc.NotImplementedError()
def get(self, uuid):
raise exc.NotImplementedError()
def filter(self, **kwargs):
raise exc.NotImplementedError()
def delete(self, uuid):
raise exc.NotImplementedError()
def update(self, uuid, **kwargs):
raise exc.NotImplementedError()
|
Add base class for API Controller
|
Add base class for API Controller
Change-Id: Idf28f395308be3416a95783d9c1ec1d6c2478487
|
Python
|
apache-2.0
|
phantomii/restalchemy
|
Add base class for API Controller
Change-Id: Idf28f395308be3416a95783d9c1ec1d6c2478487
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2014 Eugene Frolov <eugene@frolov.net.ru>
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from restalchemy.common import exceptions as exc
class Controller(object):
__resource__ = None
@classmethod
def get_resource(cls):
return cls.__resource__
@property
def model(self):
return self.get_resource()
def create(self, **kwargs):
raise exc.NotImplementedError()
def get(self, uuid):
raise exc.NotImplementedError()
def filter(self, **kwargs):
raise exc.NotImplementedError()
def delete(self, uuid):
raise exc.NotImplementedError()
def update(self, uuid, **kwargs):
raise exc.NotImplementedError()
|
<commit_before><commit_msg>Add base class for API Controller
Change-Id: Idf28f395308be3416a95783d9c1ec1d6c2478487<commit_after>
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2014 Eugene Frolov <eugene@frolov.net.ru>
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from restalchemy.common import exceptions as exc
class Controller(object):
__resource__ = None
@classmethod
def get_resource(cls):
return cls.__resource__
@property
def model(self):
return self.get_resource()
def create(self, **kwargs):
raise exc.NotImplementedError()
def get(self, uuid):
raise exc.NotImplementedError()
def filter(self, **kwargs):
raise exc.NotImplementedError()
def delete(self, uuid):
raise exc.NotImplementedError()
def update(self, uuid, **kwargs):
raise exc.NotImplementedError()
|
Add base class for API Controller
Change-Id: Idf28f395308be3416a95783d9c1ec1d6c2478487# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2014 Eugene Frolov <eugene@frolov.net.ru>
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from restalchemy.common import exceptions as exc
class Controller(object):
__resource__ = None
@classmethod
def get_resource(cls):
return cls.__resource__
@property
def model(self):
return self.get_resource()
def create(self, **kwargs):
raise exc.NotImplementedError()
def get(self, uuid):
raise exc.NotImplementedError()
def filter(self, **kwargs):
raise exc.NotImplementedError()
def delete(self, uuid):
raise exc.NotImplementedError()
def update(self, uuid, **kwargs):
raise exc.NotImplementedError()
|
<commit_before><commit_msg>Add base class for API Controller
Change-Id: Idf28f395308be3416a95783d9c1ec1d6c2478487<commit_after># vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2014 Eugene Frolov <eugene@frolov.net.ru>
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from restalchemy.common import exceptions as exc
class Controller(object):
__resource__ = None
@classmethod
def get_resource(cls):
return cls.__resource__
@property
def model(self):
return self.get_resource()
def create(self, **kwargs):
raise exc.NotImplementedError()
def get(self, uuid):
raise exc.NotImplementedError()
def filter(self, **kwargs):
raise exc.NotImplementedError()
def delete(self, uuid):
raise exc.NotImplementedError()
def update(self, uuid, **kwargs):
raise exc.NotImplementedError()
|
|
8f13e16b904d71149321b4ae8f03db32a49cdf08
|
iatidq/user_activity_types.py
|
iatidq/user_activity_types.py
|
# IATI Data Quality, tools for Data QA on IATI-formatted publications
# by Mark Brough, Martin Keegan, Ben Webb and Jennifer Smith
#
# Copyright (C) 2013 Publish What You Fund
#
# This programme is free software; you may redistribute and/or modify
# it under the terms of the GNU Affero General Public License v3.0
LOGGED_IN = 1
LOGGED_OUT = 2
|
Add user activity types module
|
Add user activity types module
|
Python
|
agpl-3.0
|
pwyf/IATI-Data-Quality,pwyf/IATI-Data-Quality,pwyf/IATI-Data-Quality,pwyf/IATI-Data-Quality
|
Add user activity types module
|
# IATI Data Quality, tools for Data QA on IATI-formatted publications
# by Mark Brough, Martin Keegan, Ben Webb and Jennifer Smith
#
# Copyright (C) 2013 Publish What You Fund
#
# This programme is free software; you may redistribute and/or modify
# it under the terms of the GNU Affero General Public License v3.0
LOGGED_IN = 1
LOGGED_OUT = 2
|
<commit_before><commit_msg>Add user activity types module<commit_after>
|
# IATI Data Quality, tools for Data QA on IATI-formatted publications
# by Mark Brough, Martin Keegan, Ben Webb and Jennifer Smith
#
# Copyright (C) 2013 Publish What You Fund
#
# This programme is free software; you may redistribute and/or modify
# it under the terms of the GNU Affero General Public License v3.0
LOGGED_IN = 1
LOGGED_OUT = 2
|
Add user activity types module
# IATI Data Quality, tools for Data QA on IATI-formatted publications
# by Mark Brough, Martin Keegan, Ben Webb and Jennifer Smith
#
# Copyright (C) 2013 Publish What You Fund
#
# This programme is free software; you may redistribute and/or modify
# it under the terms of the GNU Affero General Public License v3.0
LOGGED_IN = 1
LOGGED_OUT = 2
|
<commit_before><commit_msg>Add user activity types module<commit_after>
# IATI Data Quality, tools for Data QA on IATI-formatted publications
# by Mark Brough, Martin Keegan, Ben Webb and Jennifer Smith
#
# Copyright (C) 2013 Publish What You Fund
#
# This programme is free software; you may redistribute and/or modify
# it under the terms of the GNU Affero General Public License v3.0
LOGGED_IN = 1
LOGGED_OUT = 2
|
|
48ec53343be7d7efc07bc1f66416d884be22b430
|
Python/ds.py
|
Python/ds.py
|
"""
This file includes several data structures used in LeetCode question.
"""
# Definition for a list node.
class ListNode(object):
def __init__(self, n):
self.val = n
self.next = None
def createLinkedList(nodelist):
#type nodelist: list[int/float]
#rtype: head of linked list
linkedList = ListNode(0)
head = linkedList
for val in nodelist:
linkedList.next = ListNode(val)
linkedList = linkedList.next
return head.next
def printList(head):
if not head:
print "head is None!\n"
return
else:
while head:
print head.val
head = head.next
return
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
#TODO finish createBinaryTree
def createBinaryTree(nodelist):
root = TreeNode(0)
l = len(nodelist)
if l == 0:
return None
n = [1,2,3]
bst = createBinaryTree(n)
|
Add class file for linked list and binary tree
|
Add class file for linked list and binary tree
|
Python
|
mit
|
comicxmz001/LeetCode,comicxmz001/LeetCode
|
Add class file for linked list and binary tree
|
"""
This file includes several data structures used in LeetCode question.
"""
# Definition for a list node.
class ListNode(object):
def __init__(self, n):
self.val = n
self.next = None
def createLinkedList(nodelist):
#type nodelist: list[int/float]
#rtype: head of linked list
linkedList = ListNode(0)
head = linkedList
for val in nodelist:
linkedList.next = ListNode(val)
linkedList = linkedList.next
return head.next
def printList(head):
if not head:
print "head is None!\n"
return
else:
while head:
print head.val
head = head.next
return
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
#TODO finish createBinaryTree
def createBinaryTree(nodelist):
root = TreeNode(0)
l = len(nodelist)
if l == 0:
return None
n = [1,2,3]
bst = createBinaryTree(n)
|
<commit_before><commit_msg>Add class file for linked list and binary tree<commit_after>
|
"""
This file includes several data structures used in LeetCode question.
"""
# Definition for a list node.
class ListNode(object):
def __init__(self, n):
self.val = n
self.next = None
def createLinkedList(nodelist):
#type nodelist: list[int/float]
#rtype: head of linked list
linkedList = ListNode(0)
head = linkedList
for val in nodelist:
linkedList.next = ListNode(val)
linkedList = linkedList.next
return head.next
def printList(head):
if not head:
print "head is None!\n"
return
else:
while head:
print head.val
head = head.next
return
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
#TODO finish createBinaryTree
def createBinaryTree(nodelist):
root = TreeNode(0)
l = len(nodelist)
if l == 0:
return None
n = [1,2,3]
bst = createBinaryTree(n)
|
Add class file for linked list and binary tree"""
This file includes several data structures used in LeetCode question.
"""
# Definition for a list node.
class ListNode(object):
def __init__(self, n):
self.val = n
self.next = None
def createLinkedList(nodelist):
#type nodelist: list[int/float]
#rtype: head of linked list
linkedList = ListNode(0)
head = linkedList
for val in nodelist:
linkedList.next = ListNode(val)
linkedList = linkedList.next
return head.next
def printList(head):
if not head:
print "head is None!\n"
return
else:
while head:
print head.val
head = head.next
return
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
#TODO finish createBinaryTree
def createBinaryTree(nodelist):
root = TreeNode(0)
l = len(nodelist)
if l == 0:
return None
n = [1,2,3]
bst = createBinaryTree(n)
|
<commit_before><commit_msg>Add class file for linked list and binary tree<commit_after>"""
This file includes several data structures used in LeetCode question.
"""
# Definition for a list node.
class ListNode(object):
def __init__(self, n):
self.val = n
self.next = None
def createLinkedList(nodelist):
#type nodelist: list[int/float]
#rtype: head of linked list
linkedList = ListNode(0)
head = linkedList
for val in nodelist:
linkedList.next = ListNode(val)
linkedList = linkedList.next
return head.next
def printList(head):
if not head:
print "head is None!\n"
return
else:
while head:
print head.val
head = head.next
return
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
#TODO finish createBinaryTree
def createBinaryTree(nodelist):
root = TreeNode(0)
l = len(nodelist)
if l == 0:
return None
n = [1,2,3]
bst = createBinaryTree(n)
|
|
4601244bea1c0b84679d90b0b368da7f39908759
|
dakota_utils/tests/test_file.py
|
dakota_utils/tests/test_file.py
|
#! /usr/bin/env python
#
# Tests for dakota_utils.file.
#
# Call with:
# $ nosetests -sv
#
# Mark Piper (mark.piper@colorado.edu)
from nose.tools import *
import os
import tempfile
import shutil
from dakota_utils.file import *
def setup_module():
print('File tests:')
os.environ['_test_tmp_dir'] = tempfile.mkdtemp()
def teardown_module():
shutil.rmtree(os.environ['_test_tmp_dir'])
@raises(TypeError)
def test_remove_zero_arguments():
'''
Tests for no input parameter to remove().
'''
remove()
@raises(TypeError)
def test_remove_file_zero_arguments():
'''
Tests for no input parameter to remove_file().
'''
remove_file()
@raises(TypeError)
def test_remove_directory_zero_arguments():
'''
Tests for no input parameter to remove_directory().
'''
remove_directory()
@raises(TypeError)
def test_touch_zero_arguments():
'''
Tests for no input parameter to touch().
'''
touch()
|
Add unit tests for file module
|
Add unit tests for file module
|
Python
|
mit
|
mcflugen/dakota-experiments,mcflugen/dakota-experiments,mdpiper/dakota-experiments,mdpiper/dakota-experiments,mdpiper/dakota-experiments
|
Add unit tests for file module
|
#! /usr/bin/env python
#
# Tests for dakota_utils.file.
#
# Call with:
# $ nosetests -sv
#
# Mark Piper (mark.piper@colorado.edu)
from nose.tools import *
import os
import tempfile
import shutil
from dakota_utils.file import *
def setup_module():
print('File tests:')
os.environ['_test_tmp_dir'] = tempfile.mkdtemp()
def teardown_module():
shutil.rmtree(os.environ['_test_tmp_dir'])
@raises(TypeError)
def test_remove_zero_arguments():
'''
Tests for no input parameter to remove().
'''
remove()
@raises(TypeError)
def test_remove_file_zero_arguments():
'''
Tests for no input parameter to remove_file().
'''
remove_file()
@raises(TypeError)
def test_remove_directory_zero_arguments():
'''
Tests for no input parameter to remove_directory().
'''
remove_directory()
@raises(TypeError)
def test_touch_zero_arguments():
'''
Tests for no input parameter to touch().
'''
touch()
|
<commit_before><commit_msg>Add unit tests for file module<commit_after>
|
#! /usr/bin/env python
#
# Tests for dakota_utils.file.
#
# Call with:
# $ nosetests -sv
#
# Mark Piper (mark.piper@colorado.edu)
from nose.tools import *
import os
import tempfile
import shutil
from dakota_utils.file import *
def setup_module():
print('File tests:')
os.environ['_test_tmp_dir'] = tempfile.mkdtemp()
def teardown_module():
shutil.rmtree(os.environ['_test_tmp_dir'])
@raises(TypeError)
def test_remove_zero_arguments():
'''
Tests for no input parameter to remove().
'''
remove()
@raises(TypeError)
def test_remove_file_zero_arguments():
'''
Tests for no input parameter to remove_file().
'''
remove_file()
@raises(TypeError)
def test_remove_directory_zero_arguments():
'''
Tests for no input parameter to remove_directory().
'''
remove_directory()
@raises(TypeError)
def test_touch_zero_arguments():
'''
Tests for no input parameter to touch().
'''
touch()
|
Add unit tests for file module#! /usr/bin/env python
#
# Tests for dakota_utils.file.
#
# Call with:
# $ nosetests -sv
#
# Mark Piper (mark.piper@colorado.edu)
from nose.tools import *
import os
import tempfile
import shutil
from dakota_utils.file import *
def setup_module():
print('File tests:')
os.environ['_test_tmp_dir'] = tempfile.mkdtemp()
def teardown_module():
shutil.rmtree(os.environ['_test_tmp_dir'])
@raises(TypeError)
def test_remove_zero_arguments():
'''
Tests for no input parameter to remove().
'''
remove()
@raises(TypeError)
def test_remove_file_zero_arguments():
'''
Tests for no input parameter to remove_file().
'''
remove_file()
@raises(TypeError)
def test_remove_directory_zero_arguments():
'''
Tests for no input parameter to remove_directory().
'''
remove_directory()
@raises(TypeError)
def test_touch_zero_arguments():
'''
Tests for no input parameter to touch().
'''
touch()
|
<commit_before><commit_msg>Add unit tests for file module<commit_after>#! /usr/bin/env python
#
# Tests for dakota_utils.file.
#
# Call with:
# $ nosetests -sv
#
# Mark Piper (mark.piper@colorado.edu)
from nose.tools import *
import os
import tempfile
import shutil
from dakota_utils.file import *
def setup_module():
print('File tests:')
os.environ['_test_tmp_dir'] = tempfile.mkdtemp()
def teardown_module():
shutil.rmtree(os.environ['_test_tmp_dir'])
@raises(TypeError)
def test_remove_zero_arguments():
'''
Tests for no input parameter to remove().
'''
remove()
@raises(TypeError)
def test_remove_file_zero_arguments():
'''
Tests for no input parameter to remove_file().
'''
remove_file()
@raises(TypeError)
def test_remove_directory_zero_arguments():
'''
Tests for no input parameter to remove_directory().
'''
remove_directory()
@raises(TypeError)
def test_touch_zero_arguments():
'''
Tests for no input parameter to touch().
'''
touch()
|
|
cde2263c2084b8ce91e85face95e5e85439ab7ce
|
scripts/annotate_rsvps.py
|
scripts/annotate_rsvps.py
|
"""Utilities for annotating workshop RSVP data.
Example ::
import pandas as pd
from scripts import annotate_rsvps
frame = pd.read_csv('workshop.csv')
annotated = annotate_rsvps.process(frame)
annotated.to_csv('workshop-annotated.csv')
"""
import re
import logging
from dateutil.parser import parse as parse_date
from modularodm import Q
from modularodm.exceptions import ModularOdmException
from website.models import User, Node, NodeLog
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def find_by_email(email):
try:
return User.find_one(Q('username', 'iexact', email))
except ModularOdmException:
return None
def find_by_name(name):
try:
parts = re.split(r'\s+', name.strip())
except:
return None
if len(parts) < 2:
return None
users = User.find(
reduce(
lambda acc, value: acc & value,
[
Q('fullname', 'icontains', part.decode('utf-8', 'ignore'))
for part in parts
]
)
).sort('-date_created')
if not users:
return None
if len(users) > 1:
logger.warn('Multiple users found for name {}'.format(name))
return users[0]
def logs_since(user, date):
return NodeLog.find(
Q('user', 'eq', user._id) &
Q('date', 'gt', date)
)
def nodes_since(user, date):
return Node.find(
Q('creator', 'eq', user._id) &
Q('date_created', 'gt', date)
)
def process(frame):
frame = frame.copy()
frame['user_id'] = ''
frame['user_logs'] = ''
frame['user_nodes'] = ''
frame['last_log'] = ''
for idx, row in frame.iterrows():
user = (
find_by_email(row['Email address'].strip()) or
find_by_name(row['Name'])
)
if user:
date = parse_date(row['Workshop_date'])
frame.loc[idx, 'user_id'] = user._id
logs = logs_since(user, date)
frame.loc[idx, 'user_logs'] = logs.count()
frame.loc[idx, 'user_nodes'] = nodes_since(user, date).count()
if logs:
frame.loc[idx, 'last_log'] = logs.sort('-date')[0].date.strftime('%c')
return frame
|
Add script to annotate conference RSVP spreadsheets.
|
Add script to annotate conference RSVP spreadsheets.
Requested by @lbanner.
|
Python
|
apache-2.0
|
caneruguz/osf.io,CenterForOpenScience/osf.io,petermalcolm/osf.io,bdyetton/prettychart,asanfilippo7/osf.io,petermalcolm/osf.io,KAsante95/osf.io,ZobairAlijan/osf.io,cwisecarver/osf.io,Ghalko/osf.io,Johnetordoff/osf.io,cosenal/osf.io,DanielSBrown/osf.io,doublebits/osf.io,jnayak1/osf.io,MerlinZhang/osf.io,rdhyee/osf.io,danielneis/osf.io,danielneis/osf.io,cslzchen/osf.io,reinaH/osf.io,kushG/osf.io,HarryRybacki/osf.io,billyhunt/osf.io,acshi/osf.io,kch8qx/osf.io,jnayak1/osf.io,lamdnhan/osf.io,HarryRybacki/osf.io,caseyrollins/osf.io,revanthkolli/osf.io,hmoco/osf.io,hmoco/osf.io,icereval/osf.io,doublebits/osf.io,adlius/osf.io,erinspace/osf.io,fabianvf/osf.io,lamdnhan/osf.io,caseyrygt/osf.io,hmoco/osf.io,amyshi188/osf.io,arpitar/osf.io,cslzchen/osf.io,wearpants/osf.io,asanfilippo7/osf.io,mattclark/osf.io,zachjanicki/osf.io,monikagrabowska/osf.io,dplorimer/osf,Ghalko/osf.io,dplorimer/osf,brianjgeiger/osf.io,erinspace/osf.io,CenterForOpenScience/osf.io,kwierman/osf.io,reinaH/osf.io,DanielSBrown/osf.io,reinaH/osf.io,caneruguz/osf.io,aaxelb/osf.io,petermalcolm/osf.io,laurenrevere/osf.io,baylee-d/osf.io,brianjgeiger/osf.io,barbour-em/osf.io,caseyrygt/osf.io,monikagrabowska/osf.io,DanielSBrown/osf.io,ticklemepierce/osf.io,leb2dg/osf.io,jolene-esposito/osf.io,barbour-em/osf.io,chennan47/osf.io,GageGaskins/osf.io,jolene-esposito/osf.io,kch8qx/osf.io,binoculars/osf.io,SSJohns/osf.io,icereval/osf.io,jnayak1/osf.io,monikagrabowska/osf.io,zachjanicki/osf.io,chennan47/osf.io,TomHeatwole/osf.io,jolene-esposito/osf.io,emetsger/osf.io,cosenal/osf.io,fabianvf/osf.io,TomBaxter/osf.io,barbour-em/osf.io,cosenal/osf.io,monikagrabowska/osf.io,mluo613/osf.io,mattclark/osf.io,alexschiller/osf.io,acshi/osf.io,brandonPurvis/osf.io,Johnetordoff/osf.io,hmoco/osf.io,kwierman/osf.io,brandonPurvis/osf.io,sbt9uc/osf.io,felliott/osf.io,fabianvf/osf.io,icereval/osf.io,TomHeatwole/osf.io,ckc6cz/osf.io,HarryRybacki/osf.io,RomanZWang/osf.io,mfraezz/osf.io,adlius/osf.io,himanshuo/osf.io,lyndsysimon/osf.io,KAsante95/osf.io,felliott/osf.io,ticklemepierce/osf.io,crcresearch/osf.io,lamdnhan/osf.io,caneruguz/osf.io,chennan47/osf.io,MerlinZhang/osf.io,cslzchen/osf.io,sloria/osf.io,CenterForOpenScience/osf.io,pattisdr/osf.io,jmcarp/osf.io,dplorimer/osf,jeffreyliu3230/osf.io,binoculars/osf.io,himanshuo/osf.io,cwisecarver/osf.io,abought/osf.io,aaxelb/osf.io,wearpants/osf.io,himanshuo/osf.io,zkraime/osf.io,wearpants/osf.io,HarryRybacki/osf.io,zamattiac/osf.io,doublebits/osf.io,kushG/osf.io,zamattiac/osf.io,amyshi188/osf.io,acshi/osf.io,cwisecarver/osf.io,jeffreyliu3230/osf.io,adlius/osf.io,caseyrygt/osf.io,mfraezz/osf.io,danielneis/osf.io,cldershem/osf.io,bdyetton/prettychart,TomHeatwole/osf.io,mfraezz/osf.io,doublebits/osf.io,revanthkolli/osf.io,billyhunt/osf.io,HalcyonChimera/osf.io,adlius/osf.io,MerlinZhang/osf.io,billyhunt/osf.io,ticklemepierce/osf.io,petermalcolm/osf.io,lyndsysimon/osf.io,mluke93/osf.io,GaryKriebel/osf.io,jmcarp/osf.io,doublebits/osf.io,chrisseto/osf.io,njantrania/osf.io,kwierman/osf.io,TomBaxter/osf.io,zkraime/osf.io,abought/osf.io,KAsante95/osf.io,asanfilippo7/osf.io,saradbowman/osf.io,felliott/osf.io,GaryKriebel/osf.io,ckc6cz/osf.io,kushG/osf.io,jinluyuan/osf.io,reinaH/osf.io,ticklemepierce/osf.io,samchrisinger/osf.io,laurenrevere/osf.io,GageGaskins/osf.io,zamattiac/osf.io,RomanZWang/osf.io,felliott/osf.io,mluo613/osf.io,leb2dg/osf.io,cldershem/osf.io,Nesiehr/osf.io,GageGaskins/osf.io,pattisdr/osf.io,ZobairAlijan/osf.io,mluke93/osf.io,samchrisinger/osf.io,zkraime/osf.io,samanehsan/osf.io,amyshi188/osf.io,leb2dg/osf.io,acshi/osf.io,arpitar/osf.io,SSJohns/osf.io,pattisdr/osf.io,SSJohns/osf.io,emetsger/osf.io,lyndsysimon/osf.io,kch8qx/osf.io,sloria/osf.io,Nesiehr/osf.io,Ghalko/osf.io,cslzchen/osf.io,cosenal/osf.io,mattclark/osf.io,Nesiehr/osf.io,danielneis/osf.io,baylee-d/osf.io,saradbowman/osf.io,njantrania/osf.io,kwierman/osf.io,GageGaskins/osf.io,jinluyuan/osf.io,binoculars/osf.io,mluo613/osf.io,kch8qx/osf.io,caseyrollins/osf.io,caneruguz/osf.io,ckc6cz/osf.io,njantrania/osf.io,mluke93/osf.io,Ghalko/osf.io,samanehsan/osf.io,samchrisinger/osf.io,alexschiller/osf.io,fabianvf/osf.io,jmcarp/osf.io,brianjgeiger/osf.io,crcresearch/osf.io,jeffreyliu3230/osf.io,HalcyonChimera/osf.io,emetsger/osf.io,emetsger/osf.io,leb2dg/osf.io,KAsante95/osf.io,haoyuchen1992/osf.io,KAsante95/osf.io,wearpants/osf.io,njantrania/osf.io,ZobairAlijan/osf.io,samanehsan/osf.io,Nesiehr/osf.io,ckc6cz/osf.io,revanthkolli/osf.io,HalcyonChimera/osf.io,brianjgeiger/osf.io,bdyetton/prettychart,acshi/osf.io,rdhyee/osf.io,barbour-em/osf.io,kch8qx/osf.io,jinluyuan/osf.io,cwisecarver/osf.io,DanielSBrown/osf.io,zachjanicki/osf.io,Johnetordoff/osf.io,kushG/osf.io,samchrisinger/osf.io,RomanZWang/osf.io,dplorimer/osf,mfraezz/osf.io,monikagrabowska/osf.io,sbt9uc/osf.io,ZobairAlijan/osf.io,zachjanicki/osf.io,revanthkolli/osf.io,GaryKriebel/osf.io,cldershem/osf.io,brandonPurvis/osf.io,sbt9uc/osf.io,GaryKriebel/osf.io,cldershem/osf.io,bdyetton/prettychart,laurenrevere/osf.io,jnayak1/osf.io,arpitar/osf.io,Johnetordoff/osf.io,SSJohns/osf.io,abought/osf.io,GageGaskins/osf.io,brandonPurvis/osf.io,rdhyee/osf.io,haoyuchen1992/osf.io,rdhyee/osf.io,jeffreyliu3230/osf.io,mluo613/osf.io,RomanZWang/osf.io,jmcarp/osf.io,caseyrollins/osf.io,erinspace/osf.io,caseyrygt/osf.io,billyhunt/osf.io,crcresearch/osf.io,aaxelb/osf.io,RomanZWang/osf.io,TomHeatwole/osf.io,alexschiller/osf.io,sloria/osf.io,CenterForOpenScience/osf.io,himanshuo/osf.io,abought/osf.io,aaxelb/osf.io,haoyuchen1992/osf.io,lamdnhan/osf.io,mluke93/osf.io,chrisseto/osf.io,zkraime/osf.io,HalcyonChimera/osf.io,jolene-esposito/osf.io,sbt9uc/osf.io,chrisseto/osf.io,TomBaxter/osf.io,alexschiller/osf.io,amyshi188/osf.io,zamattiac/osf.io,chrisseto/osf.io,lyndsysimon/osf.io,haoyuchen1992/osf.io,samanehsan/osf.io,alexschiller/osf.io,billyhunt/osf.io,asanfilippo7/osf.io,MerlinZhang/osf.io,mluo613/osf.io,jinluyuan/osf.io,baylee-d/osf.io,brandonPurvis/osf.io,arpitar/osf.io
|
Add script to annotate conference RSVP spreadsheets.
Requested by @lbanner.
|
"""Utilities for annotating workshop RSVP data.
Example ::
import pandas as pd
from scripts import annotate_rsvps
frame = pd.read_csv('workshop.csv')
annotated = annotate_rsvps.process(frame)
annotated.to_csv('workshop-annotated.csv')
"""
import re
import logging
from dateutil.parser import parse as parse_date
from modularodm import Q
from modularodm.exceptions import ModularOdmException
from website.models import User, Node, NodeLog
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def find_by_email(email):
try:
return User.find_one(Q('username', 'iexact', email))
except ModularOdmException:
return None
def find_by_name(name):
try:
parts = re.split(r'\s+', name.strip())
except:
return None
if len(parts) < 2:
return None
users = User.find(
reduce(
lambda acc, value: acc & value,
[
Q('fullname', 'icontains', part.decode('utf-8', 'ignore'))
for part in parts
]
)
).sort('-date_created')
if not users:
return None
if len(users) > 1:
logger.warn('Multiple users found for name {}'.format(name))
return users[0]
def logs_since(user, date):
return NodeLog.find(
Q('user', 'eq', user._id) &
Q('date', 'gt', date)
)
def nodes_since(user, date):
return Node.find(
Q('creator', 'eq', user._id) &
Q('date_created', 'gt', date)
)
def process(frame):
frame = frame.copy()
frame['user_id'] = ''
frame['user_logs'] = ''
frame['user_nodes'] = ''
frame['last_log'] = ''
for idx, row in frame.iterrows():
user = (
find_by_email(row['Email address'].strip()) or
find_by_name(row['Name'])
)
if user:
date = parse_date(row['Workshop_date'])
frame.loc[idx, 'user_id'] = user._id
logs = logs_since(user, date)
frame.loc[idx, 'user_logs'] = logs.count()
frame.loc[idx, 'user_nodes'] = nodes_since(user, date).count()
if logs:
frame.loc[idx, 'last_log'] = logs.sort('-date')[0].date.strftime('%c')
return frame
|
<commit_before><commit_msg>Add script to annotate conference RSVP spreadsheets.
Requested by @lbanner.<commit_after>
|
"""Utilities for annotating workshop RSVP data.
Example ::
import pandas as pd
from scripts import annotate_rsvps
frame = pd.read_csv('workshop.csv')
annotated = annotate_rsvps.process(frame)
annotated.to_csv('workshop-annotated.csv')
"""
import re
import logging
from dateutil.parser import parse as parse_date
from modularodm import Q
from modularodm.exceptions import ModularOdmException
from website.models import User, Node, NodeLog
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def find_by_email(email):
try:
return User.find_one(Q('username', 'iexact', email))
except ModularOdmException:
return None
def find_by_name(name):
try:
parts = re.split(r'\s+', name.strip())
except:
return None
if len(parts) < 2:
return None
users = User.find(
reduce(
lambda acc, value: acc & value,
[
Q('fullname', 'icontains', part.decode('utf-8', 'ignore'))
for part in parts
]
)
).sort('-date_created')
if not users:
return None
if len(users) > 1:
logger.warn('Multiple users found for name {}'.format(name))
return users[0]
def logs_since(user, date):
return NodeLog.find(
Q('user', 'eq', user._id) &
Q('date', 'gt', date)
)
def nodes_since(user, date):
return Node.find(
Q('creator', 'eq', user._id) &
Q('date_created', 'gt', date)
)
def process(frame):
frame = frame.copy()
frame['user_id'] = ''
frame['user_logs'] = ''
frame['user_nodes'] = ''
frame['last_log'] = ''
for idx, row in frame.iterrows():
user = (
find_by_email(row['Email address'].strip()) or
find_by_name(row['Name'])
)
if user:
date = parse_date(row['Workshop_date'])
frame.loc[idx, 'user_id'] = user._id
logs = logs_since(user, date)
frame.loc[idx, 'user_logs'] = logs.count()
frame.loc[idx, 'user_nodes'] = nodes_since(user, date).count()
if logs:
frame.loc[idx, 'last_log'] = logs.sort('-date')[0].date.strftime('%c')
return frame
|
Add script to annotate conference RSVP spreadsheets.
Requested by @lbanner."""Utilities for annotating workshop RSVP data.
Example ::
import pandas as pd
from scripts import annotate_rsvps
frame = pd.read_csv('workshop.csv')
annotated = annotate_rsvps.process(frame)
annotated.to_csv('workshop-annotated.csv')
"""
import re
import logging
from dateutil.parser import parse as parse_date
from modularodm import Q
from modularodm.exceptions import ModularOdmException
from website.models import User, Node, NodeLog
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def find_by_email(email):
try:
return User.find_one(Q('username', 'iexact', email))
except ModularOdmException:
return None
def find_by_name(name):
try:
parts = re.split(r'\s+', name.strip())
except:
return None
if len(parts) < 2:
return None
users = User.find(
reduce(
lambda acc, value: acc & value,
[
Q('fullname', 'icontains', part.decode('utf-8', 'ignore'))
for part in parts
]
)
).sort('-date_created')
if not users:
return None
if len(users) > 1:
logger.warn('Multiple users found for name {}'.format(name))
return users[0]
def logs_since(user, date):
return NodeLog.find(
Q('user', 'eq', user._id) &
Q('date', 'gt', date)
)
def nodes_since(user, date):
return Node.find(
Q('creator', 'eq', user._id) &
Q('date_created', 'gt', date)
)
def process(frame):
frame = frame.copy()
frame['user_id'] = ''
frame['user_logs'] = ''
frame['user_nodes'] = ''
frame['last_log'] = ''
for idx, row in frame.iterrows():
user = (
find_by_email(row['Email address'].strip()) or
find_by_name(row['Name'])
)
if user:
date = parse_date(row['Workshop_date'])
frame.loc[idx, 'user_id'] = user._id
logs = logs_since(user, date)
frame.loc[idx, 'user_logs'] = logs.count()
frame.loc[idx, 'user_nodes'] = nodes_since(user, date).count()
if logs:
frame.loc[idx, 'last_log'] = logs.sort('-date')[0].date.strftime('%c')
return frame
|
<commit_before><commit_msg>Add script to annotate conference RSVP spreadsheets.
Requested by @lbanner.<commit_after>"""Utilities for annotating workshop RSVP data.
Example ::
import pandas as pd
from scripts import annotate_rsvps
frame = pd.read_csv('workshop.csv')
annotated = annotate_rsvps.process(frame)
annotated.to_csv('workshop-annotated.csv')
"""
import re
import logging
from dateutil.parser import parse as parse_date
from modularodm import Q
from modularodm.exceptions import ModularOdmException
from website.models import User, Node, NodeLog
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def find_by_email(email):
try:
return User.find_one(Q('username', 'iexact', email))
except ModularOdmException:
return None
def find_by_name(name):
try:
parts = re.split(r'\s+', name.strip())
except:
return None
if len(parts) < 2:
return None
users = User.find(
reduce(
lambda acc, value: acc & value,
[
Q('fullname', 'icontains', part.decode('utf-8', 'ignore'))
for part in parts
]
)
).sort('-date_created')
if not users:
return None
if len(users) > 1:
logger.warn('Multiple users found for name {}'.format(name))
return users[0]
def logs_since(user, date):
return NodeLog.find(
Q('user', 'eq', user._id) &
Q('date', 'gt', date)
)
def nodes_since(user, date):
return Node.find(
Q('creator', 'eq', user._id) &
Q('date_created', 'gt', date)
)
def process(frame):
frame = frame.copy()
frame['user_id'] = ''
frame['user_logs'] = ''
frame['user_nodes'] = ''
frame['last_log'] = ''
for idx, row in frame.iterrows():
user = (
find_by_email(row['Email address'].strip()) or
find_by_name(row['Name'])
)
if user:
date = parse_date(row['Workshop_date'])
frame.loc[idx, 'user_id'] = user._id
logs = logs_since(user, date)
frame.loc[idx, 'user_logs'] = logs.count()
frame.loc[idx, 'user_nodes'] = nodes_since(user, date).count()
if logs:
frame.loc[idx, 'last_log'] = logs.sort('-date')[0].date.strftime('%c')
return frame
|
|
1f17a2a4efaefa0b77eef123dd950e1c047fcb2d
|
scripts/copy_snippet.py
|
scripts/copy_snippet.py
|
#!/usr/bin/env python
"""Copy a snippet (in its latest version) from one party to another.
:Copyright: 2006-2018 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
import click
from byceps.services.snippet.models.snippet import SnippetType
from byceps.services.snippet import service as snippet_service
from byceps.util.system import get_config_filename_from_env_or_exit
from bootstrap.util import app_context
from bootstrap.validators import validate_party
@click.command()
@click.pass_context
@click.argument('source_party', callback=validate_party)
@click.argument('target_party', callback=validate_party)
@click.argument('snippet_name')
def execute(ctx, source_party, target_party, snippet_name):
snippet_version = snippet_service \
.find_current_version_of_snippet_with_name(
source_party.id, snippet_name)
if snippet_version is None:
raise click.BadParameter('Unknown snippet name "{}" for party "{}".'
.format(snippet_name, source_party.id))
snippet = snippet_version.snippet
if snippet.type_ == SnippetType.document:
snippet_service.create_document(
target_party.id,
snippet.name,
snippet_version.creator_id,
snippet_version.title,
snippet_version.body,
head=snippet_version.head,
image_url_path=snippet_version.image_url_path
)
elif snippet.type_ == SnippetType.fragment:
snippet_service.create_fragment(
target_party.id,
snippet.name,
snippet_version.creator_id,
snippet_version.body
)
else:
ctx.fail("Unknown snippet type '{}'.".format(snippet.type_))
click.secho('Done.', fg='green')
if __name__ == '__main__':
config_filename = get_config_filename_from_env_or_exit()
with app_context(config_filename):
execute()
|
Add script to copy a snippet from one party to another
|
Add script to copy a snippet from one party to another
|
Python
|
bsd-3-clause
|
m-ober/byceps,m-ober/byceps,homeworkprod/byceps,homeworkprod/byceps,m-ober/byceps,homeworkprod/byceps
|
Add script to copy a snippet from one party to another
|
#!/usr/bin/env python
"""Copy a snippet (in its latest version) from one party to another.
:Copyright: 2006-2018 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
import click
from byceps.services.snippet.models.snippet import SnippetType
from byceps.services.snippet import service as snippet_service
from byceps.util.system import get_config_filename_from_env_or_exit
from bootstrap.util import app_context
from bootstrap.validators import validate_party
@click.command()
@click.pass_context
@click.argument('source_party', callback=validate_party)
@click.argument('target_party', callback=validate_party)
@click.argument('snippet_name')
def execute(ctx, source_party, target_party, snippet_name):
snippet_version = snippet_service \
.find_current_version_of_snippet_with_name(
source_party.id, snippet_name)
if snippet_version is None:
raise click.BadParameter('Unknown snippet name "{}" for party "{}".'
.format(snippet_name, source_party.id))
snippet = snippet_version.snippet
if snippet.type_ == SnippetType.document:
snippet_service.create_document(
target_party.id,
snippet.name,
snippet_version.creator_id,
snippet_version.title,
snippet_version.body,
head=snippet_version.head,
image_url_path=snippet_version.image_url_path
)
elif snippet.type_ == SnippetType.fragment:
snippet_service.create_fragment(
target_party.id,
snippet.name,
snippet_version.creator_id,
snippet_version.body
)
else:
ctx.fail("Unknown snippet type '{}'.".format(snippet.type_))
click.secho('Done.', fg='green')
if __name__ == '__main__':
config_filename = get_config_filename_from_env_or_exit()
with app_context(config_filename):
execute()
|
<commit_before><commit_msg>Add script to copy a snippet from one party to another<commit_after>
|
#!/usr/bin/env python
"""Copy a snippet (in its latest version) from one party to another.
:Copyright: 2006-2018 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
import click
from byceps.services.snippet.models.snippet import SnippetType
from byceps.services.snippet import service as snippet_service
from byceps.util.system import get_config_filename_from_env_or_exit
from bootstrap.util import app_context
from bootstrap.validators import validate_party
@click.command()
@click.pass_context
@click.argument('source_party', callback=validate_party)
@click.argument('target_party', callback=validate_party)
@click.argument('snippet_name')
def execute(ctx, source_party, target_party, snippet_name):
snippet_version = snippet_service \
.find_current_version_of_snippet_with_name(
source_party.id, snippet_name)
if snippet_version is None:
raise click.BadParameter('Unknown snippet name "{}" for party "{}".'
.format(snippet_name, source_party.id))
snippet = snippet_version.snippet
if snippet.type_ == SnippetType.document:
snippet_service.create_document(
target_party.id,
snippet.name,
snippet_version.creator_id,
snippet_version.title,
snippet_version.body,
head=snippet_version.head,
image_url_path=snippet_version.image_url_path
)
elif snippet.type_ == SnippetType.fragment:
snippet_service.create_fragment(
target_party.id,
snippet.name,
snippet_version.creator_id,
snippet_version.body
)
else:
ctx.fail("Unknown snippet type '{}'.".format(snippet.type_))
click.secho('Done.', fg='green')
if __name__ == '__main__':
config_filename = get_config_filename_from_env_or_exit()
with app_context(config_filename):
execute()
|
Add script to copy a snippet from one party to another#!/usr/bin/env python
"""Copy a snippet (in its latest version) from one party to another.
:Copyright: 2006-2018 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
import click
from byceps.services.snippet.models.snippet import SnippetType
from byceps.services.snippet import service as snippet_service
from byceps.util.system import get_config_filename_from_env_or_exit
from bootstrap.util import app_context
from bootstrap.validators import validate_party
@click.command()
@click.pass_context
@click.argument('source_party', callback=validate_party)
@click.argument('target_party', callback=validate_party)
@click.argument('snippet_name')
def execute(ctx, source_party, target_party, snippet_name):
snippet_version = snippet_service \
.find_current_version_of_snippet_with_name(
source_party.id, snippet_name)
if snippet_version is None:
raise click.BadParameter('Unknown snippet name "{}" for party "{}".'
.format(snippet_name, source_party.id))
snippet = snippet_version.snippet
if snippet.type_ == SnippetType.document:
snippet_service.create_document(
target_party.id,
snippet.name,
snippet_version.creator_id,
snippet_version.title,
snippet_version.body,
head=snippet_version.head,
image_url_path=snippet_version.image_url_path
)
elif snippet.type_ == SnippetType.fragment:
snippet_service.create_fragment(
target_party.id,
snippet.name,
snippet_version.creator_id,
snippet_version.body
)
else:
ctx.fail("Unknown snippet type '{}'.".format(snippet.type_))
click.secho('Done.', fg='green')
if __name__ == '__main__':
config_filename = get_config_filename_from_env_or_exit()
with app_context(config_filename):
execute()
|
<commit_before><commit_msg>Add script to copy a snippet from one party to another<commit_after>#!/usr/bin/env python
"""Copy a snippet (in its latest version) from one party to another.
:Copyright: 2006-2018 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
import click
from byceps.services.snippet.models.snippet import SnippetType
from byceps.services.snippet import service as snippet_service
from byceps.util.system import get_config_filename_from_env_or_exit
from bootstrap.util import app_context
from bootstrap.validators import validate_party
@click.command()
@click.pass_context
@click.argument('source_party', callback=validate_party)
@click.argument('target_party', callback=validate_party)
@click.argument('snippet_name')
def execute(ctx, source_party, target_party, snippet_name):
snippet_version = snippet_service \
.find_current_version_of_snippet_with_name(
source_party.id, snippet_name)
if snippet_version is None:
raise click.BadParameter('Unknown snippet name "{}" for party "{}".'
.format(snippet_name, source_party.id))
snippet = snippet_version.snippet
if snippet.type_ == SnippetType.document:
snippet_service.create_document(
target_party.id,
snippet.name,
snippet_version.creator_id,
snippet_version.title,
snippet_version.body,
head=snippet_version.head,
image_url_path=snippet_version.image_url_path
)
elif snippet.type_ == SnippetType.fragment:
snippet_service.create_fragment(
target_party.id,
snippet.name,
snippet_version.creator_id,
snippet_version.body
)
else:
ctx.fail("Unknown snippet type '{}'.".format(snippet.type_))
click.secho('Done.', fg='green')
if __name__ == '__main__':
config_filename = get_config_filename_from_env_or_exit()
with app_context(config_filename):
execute()
|
|
9991b40871ccd70ee4359b8e44e9a3a5aa15fd8a
|
migrations/versions/0243_another_letter_org.py
|
migrations/versions/0243_another_letter_org.py
|
"""empty message
Revision ID: 0243_another_letter_org
Revises: 0242_template_folders
"""
# revision identifiers, used by Alembic.
revision = '0243_another_letter_org'
down_revision = '0242_template_folders'
from alembic import op
NEW_ORGANISATIONS = [
('516', 'Worcestershire County Council', 'worcestershire'),
('517', 'Buckinghamshire County Council', 'buckinghamshire'),
]
def upgrade():
for numeric_id, name, filename in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}', '{}')
""".format(numeric_id, name, filename))
def downgrade():
for numeric_id, _, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
Add Buckinghamshire and Worcestershire letter logos
|
Add Buckinghamshire and Worcestershire letter logos
|
Python
|
mit
|
alphagov/notifications-api,alphagov/notifications-api
|
Add Buckinghamshire and Worcestershire letter logos
|
"""empty message
Revision ID: 0243_another_letter_org
Revises: 0242_template_folders
"""
# revision identifiers, used by Alembic.
revision = '0243_another_letter_org'
down_revision = '0242_template_folders'
from alembic import op
NEW_ORGANISATIONS = [
('516', 'Worcestershire County Council', 'worcestershire'),
('517', 'Buckinghamshire County Council', 'buckinghamshire'),
]
def upgrade():
for numeric_id, name, filename in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}', '{}')
""".format(numeric_id, name, filename))
def downgrade():
for numeric_id, _, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
<commit_before><commit_msg>Add Buckinghamshire and Worcestershire letter logos<commit_after>
|
"""empty message
Revision ID: 0243_another_letter_org
Revises: 0242_template_folders
"""
# revision identifiers, used by Alembic.
revision = '0243_another_letter_org'
down_revision = '0242_template_folders'
from alembic import op
NEW_ORGANISATIONS = [
('516', 'Worcestershire County Council', 'worcestershire'),
('517', 'Buckinghamshire County Council', 'buckinghamshire'),
]
def upgrade():
for numeric_id, name, filename in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}', '{}')
""".format(numeric_id, name, filename))
def downgrade():
for numeric_id, _, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
Add Buckinghamshire and Worcestershire letter logos"""empty message
Revision ID: 0243_another_letter_org
Revises: 0242_template_folders
"""
# revision identifiers, used by Alembic.
revision = '0243_another_letter_org'
down_revision = '0242_template_folders'
from alembic import op
NEW_ORGANISATIONS = [
('516', 'Worcestershire County Council', 'worcestershire'),
('517', 'Buckinghamshire County Council', 'buckinghamshire'),
]
def upgrade():
for numeric_id, name, filename in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}', '{}')
""".format(numeric_id, name, filename))
def downgrade():
for numeric_id, _, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
<commit_before><commit_msg>Add Buckinghamshire and Worcestershire letter logos<commit_after>"""empty message
Revision ID: 0243_another_letter_org
Revises: 0242_template_folders
"""
# revision identifiers, used by Alembic.
revision = '0243_another_letter_org'
down_revision = '0242_template_folders'
from alembic import op
NEW_ORGANISATIONS = [
('516', 'Worcestershire County Council', 'worcestershire'),
('517', 'Buckinghamshire County Council', 'buckinghamshire'),
]
def upgrade():
for numeric_id, name, filename in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}', '{}')
""".format(numeric_id, name, filename))
def downgrade():
for numeric_id, _, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
|
ffd021e5effd8264bb8708cb52f17498f3969357
|
scripts/update_names.py
|
scripts/update_names.py
|
from accounts.models import TimtecUser
import re
cont = 0
for user in TimtecUser.objects.all():
complete_name = user.first_name + " " + user.last_name
complete_name = re.sub(' +',' ', complete_name)
if (complete_name != ' '):
user.first_name = complete_name
user.last_name = ''
user.save()
cont = cont + 1
print(str(cont) + " Nomes atualizados")
|
Add script to update names
|
Add script to update names
|
Python
|
agpl-3.0
|
mupi/tecsaladeaula,mupi/tecsaladeaula,mupi/tecsaladeaula,mupi/tecsaladeaula
|
Add script to update names
|
from accounts.models import TimtecUser
import re
cont = 0
for user in TimtecUser.objects.all():
complete_name = user.first_name + " " + user.last_name
complete_name = re.sub(' +',' ', complete_name)
if (complete_name != ' '):
user.first_name = complete_name
user.last_name = ''
user.save()
cont = cont + 1
print(str(cont) + " Nomes atualizados")
|
<commit_before><commit_msg>Add script to update names<commit_after>
|
from accounts.models import TimtecUser
import re
cont = 0
for user in TimtecUser.objects.all():
complete_name = user.first_name + " " + user.last_name
complete_name = re.sub(' +',' ', complete_name)
if (complete_name != ' '):
user.first_name = complete_name
user.last_name = ''
user.save()
cont = cont + 1
print(str(cont) + " Nomes atualizados")
|
Add script to update namesfrom accounts.models import TimtecUser
import re
cont = 0
for user in TimtecUser.objects.all():
complete_name = user.first_name + " " + user.last_name
complete_name = re.sub(' +',' ', complete_name)
if (complete_name != ' '):
user.first_name = complete_name
user.last_name = ''
user.save()
cont = cont + 1
print(str(cont) + " Nomes atualizados")
|
<commit_before><commit_msg>Add script to update names<commit_after>from accounts.models import TimtecUser
import re
cont = 0
for user in TimtecUser.objects.all():
complete_name = user.first_name + " " + user.last_name
complete_name = re.sub(' +',' ', complete_name)
if (complete_name != ' '):
user.first_name = complete_name
user.last_name = ''
user.save()
cont = cont + 1
print(str(cont) + " Nomes atualizados")
|
|
b00d66ebe0248fcaff164997638c8890b5b5c17c
|
tensorflow/python/autograph/utils/compat_util.py
|
tensorflow/python/autograph/utils/compat_util.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Miscellaneous utilities that don't fit anywhere else."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import sys
import types
import six
class BasicRef(object):
"""This shim emulates the nonlocal keyword in Py2-compatible source."""
def __init__(self, init_value):
self.value = init_value
def deprecated_py2_support(module_name):
"""Swaps calling module with a Py2-specific implementation. Noop in Py3."""
if six.PY2:
legacy_module = importlib.import_module(module_name + '_deprecated_py2')
current_module = sys.modules[module_name]
for name, target_val in legacy_module.__dict__.items():
if isinstance(target_val, types.FunctionType):
replacement = types.FunctionType(
target_val.__code__, current_module.__dict__, target_val.__name__,
target_val.__defaults__, target_val.__closure__)
current_module.__dict__[name] = replacement
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Miscellaneous utilities that don't fit anywhere else."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import sys
import six
class BasicRef(object):
"""This shim emulates the nonlocal keyword in Py2-compatible source."""
def __init__(self, init_value):
self.value = init_value
def deprecated_py2_support(module_name):
"""Swaps calling module with a Py2-specific implementation. Noop in Py3."""
if six.PY2:
legacy_module = importlib.import_module(module_name + '_deprecated_py2')
sys.modules[module_name] = legacy_module
|
Swap out modules in py2 mode in a cleaner fashion.
|
Swap out modules in py2 mode in a cleaner fashion.
PiperOrigin-RevId: 288526813
Change-Id: I86efd4d804c0c873856307cf4a969270eb7bbae8
|
Python
|
apache-2.0
|
annarev/tensorflow,aam-at/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,xzturn/tensorflow,annarev/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow,yongtang/tensorflow,cxxgtxy/tensorflow,davidzchen/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,paolodedios/tensorflow,jhseu/tensorflow,karllessard/tensorflow,aldian/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,gautam1858/tensorflow,renyi533/tensorflow,renyi533/tensorflow,gunan/tensorflow,gunan/tensorflow,annarev/tensorflow,sarvex/tensorflow,aldian/tensorflow,frreiss/tensorflow-fred,gautam1858/tensorflow,paolodedios/tensorflow,frreiss/tensorflow-fred,gautam1858/tensorflow,karllessard/tensorflow,karllessard/tensorflow,paolodedios/tensorflow,sarvex/tensorflow,jhseu/tensorflow,davidzchen/tensorflow,cxxgtxy/tensorflow,jhseu/tensorflow,yongtang/tensorflow,xzturn/tensorflow,yongtang/tensorflow,renyi533/tensorflow,davidzchen/tensorflow,tensorflow/tensorflow,paolodedios/tensorflow,jhseu/tensorflow,sarvex/tensorflow,annarev/tensorflow,yongtang/tensorflow,tensorflow/tensorflow,gunan/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-tensorflow/tensorflow,annarev/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,annarev/tensorflow,jhseu/tensorflow,davidzchen/tensorflow,annarev/tensorflow,xzturn/tensorflow,aam-at/tensorflow,tensorflow/tensorflow-pywrap_saved_model,davidzchen/tensorflow,yongtang/tensorflow,karllessard/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow-pywrap_saved_model,sarvex/tensorflow,freedomtan/tensorflow,davidzchen/tensorflow,annarev/tensorflow,annarev/tensorflow,yongtang/tensorflow,gunan/tensorflow,frreiss/tensorflow-fred,aam-at/tensorflow,petewarden/tensorflow,paolodedios/tensorflow,Intel-Corporation/tensorflow,freedomtan/tensorflow,davidzchen/tensorflow,frreiss/tensorflow-fred,paolodedios/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,petewarden/tensorflow,gautam1858/tensorflow,frreiss/tensorflow-fred,renyi533/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-Corporation/tensorflow,aam-at/tensorflow,sarvex/tensorflow,tensorflow/tensorflow,gautam1858/tensorflow,petewarden/tensorflow,paolodedios/tensorflow,petewarden/tensorflow,xzturn/tensorflow,gunan/tensorflow,cxxgtxy/tensorflow,gunan/tensorflow,paolodedios/tensorflow,freedomtan/tensorflow,tensorflow/tensorflow-pywrap_saved_model,aam-at/tensorflow,jhseu/tensorflow,Intel-Corporation/tensorflow,tensorflow/tensorflow,aldian/tensorflow,petewarden/tensorflow,petewarden/tensorflow,renyi533/tensorflow,frreiss/tensorflow-fred,annarev/tensorflow,tensorflow/tensorflow-pywrap_saved_model,Intel-tensorflow/tensorflow,renyi533/tensorflow,Intel-Corporation/tensorflow,jhseu/tensorflow,cxxgtxy/tensorflow,jhseu/tensorflow,xzturn/tensorflow,freedomtan/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,freedomtan/tensorflow,Intel-tensorflow/tensorflow,cxxgtxy/tensorflow,jhseu/tensorflow,paolodedios/tensorflow,aam-at/tensorflow,cxxgtxy/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-Corporation/tensorflow,yongtang/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,jhseu/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,davidzchen/tensorflow,Intel-tensorflow/tensorflow,gautam1858/tensorflow,petewarden/tensorflow,xzturn/tensorflow,gautam1858/tensorflow,renyi533/tensorflow,Intel-tensorflow/tensorflow,sarvex/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_tf_optimizer,aam-at/tensorflow,gunan/tensorflow,gautam1858/tensorflow,xzturn/tensorflow,aam-at/tensorflow,Intel-tensorflow/tensorflow,Intel-Corporation/tensorflow,Intel-Corporation/tensorflow,aldian/tensorflow,karllessard/tensorflow,sarvex/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,davidzchen/tensorflow,tensorflow/tensorflow-pywrap_saved_model,aldian/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,freedomtan/tensorflow,gautam1858/tensorflow,gunan/tensorflow,Intel-tensorflow/tensorflow,freedomtan/tensorflow,freedomtan/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow,freedomtan/tensorflow,renyi533/tensorflow,Intel-tensorflow/tensorflow,paolodedios/tensorflow,aam-at/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,annarev/tensorflow,gunan/tensorflow,tensorflow/tensorflow,karllessard/tensorflow,aldian/tensorflow,gautam1858/tensorflow,xzturn/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow,frreiss/tensorflow-fred,Intel-Corporation/tensorflow,renyi533/tensorflow,gautam1858/tensorflow,frreiss/tensorflow-fred,jhseu/tensorflow,frreiss/tensorflow-fred,renyi533/tensorflow,petewarden/tensorflow,freedomtan/tensorflow,aam-at/tensorflow,karllessard/tensorflow,petewarden/tensorflow,yongtang/tensorflow,aldian/tensorflow,gunan/tensorflow,frreiss/tensorflow-fred,jhseu/tensorflow,tensorflow/tensorflow-pywrap_saved_model,gunan/tensorflow,xzturn/tensorflow,freedomtan/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,aam-at/tensorflow,petewarden/tensorflow,freedomtan/tensorflow,petewarden/tensorflow,davidzchen/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,cxxgtxy/tensorflow,petewarden/tensorflow,sarvex/tensorflow,xzturn/tensorflow,renyi533/tensorflow,Intel-tensorflow/tensorflow,yongtang/tensorflow,xzturn/tensorflow,aldian/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-tensorflow/tensorflow,gautam1858/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,renyi533/tensorflow,cxxgtxy/tensorflow,davidzchen/tensorflow,frreiss/tensorflow-fred,aam-at/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,xzturn/tensorflow,tensorflow/tensorflow,davidzchen/tensorflow,gunan/tensorflow
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Miscellaneous utilities that don't fit anywhere else."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import sys
import types
import six
class BasicRef(object):
"""This shim emulates the nonlocal keyword in Py2-compatible source."""
def __init__(self, init_value):
self.value = init_value
def deprecated_py2_support(module_name):
"""Swaps calling module with a Py2-specific implementation. Noop in Py3."""
if six.PY2:
legacy_module = importlib.import_module(module_name + '_deprecated_py2')
current_module = sys.modules[module_name]
for name, target_val in legacy_module.__dict__.items():
if isinstance(target_val, types.FunctionType):
replacement = types.FunctionType(
target_val.__code__, current_module.__dict__, target_val.__name__,
target_val.__defaults__, target_val.__closure__)
current_module.__dict__[name] = replacement
Swap out modules in py2 mode in a cleaner fashion.
PiperOrigin-RevId: 288526813
Change-Id: I86efd4d804c0c873856307cf4a969270eb7bbae8
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Miscellaneous utilities that don't fit anywhere else."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import sys
import six
class BasicRef(object):
"""This shim emulates the nonlocal keyword in Py2-compatible source."""
def __init__(self, init_value):
self.value = init_value
def deprecated_py2_support(module_name):
"""Swaps calling module with a Py2-specific implementation. Noop in Py3."""
if six.PY2:
legacy_module = importlib.import_module(module_name + '_deprecated_py2')
sys.modules[module_name] = legacy_module
|
<commit_before># Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Miscellaneous utilities that don't fit anywhere else."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import sys
import types
import six
class BasicRef(object):
"""This shim emulates the nonlocal keyword in Py2-compatible source."""
def __init__(self, init_value):
self.value = init_value
def deprecated_py2_support(module_name):
"""Swaps calling module with a Py2-specific implementation. Noop in Py3."""
if six.PY2:
legacy_module = importlib.import_module(module_name + '_deprecated_py2')
current_module = sys.modules[module_name]
for name, target_val in legacy_module.__dict__.items():
if isinstance(target_val, types.FunctionType):
replacement = types.FunctionType(
target_val.__code__, current_module.__dict__, target_val.__name__,
target_val.__defaults__, target_val.__closure__)
current_module.__dict__[name] = replacement
<commit_msg>Swap out modules in py2 mode in a cleaner fashion.
PiperOrigin-RevId: 288526813
Change-Id: I86efd4d804c0c873856307cf4a969270eb7bbae8<commit_after>
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Miscellaneous utilities that don't fit anywhere else."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import sys
import six
class BasicRef(object):
"""This shim emulates the nonlocal keyword in Py2-compatible source."""
def __init__(self, init_value):
self.value = init_value
def deprecated_py2_support(module_name):
"""Swaps calling module with a Py2-specific implementation. Noop in Py3."""
if six.PY2:
legacy_module = importlib.import_module(module_name + '_deprecated_py2')
sys.modules[module_name] = legacy_module
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Miscellaneous utilities that don't fit anywhere else."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import sys
import types
import six
class BasicRef(object):
"""This shim emulates the nonlocal keyword in Py2-compatible source."""
def __init__(self, init_value):
self.value = init_value
def deprecated_py2_support(module_name):
"""Swaps calling module with a Py2-specific implementation. Noop in Py3."""
if six.PY2:
legacy_module = importlib.import_module(module_name + '_deprecated_py2')
current_module = sys.modules[module_name]
for name, target_val in legacy_module.__dict__.items():
if isinstance(target_val, types.FunctionType):
replacement = types.FunctionType(
target_val.__code__, current_module.__dict__, target_val.__name__,
target_val.__defaults__, target_val.__closure__)
current_module.__dict__[name] = replacement
Swap out modules in py2 mode in a cleaner fashion.
PiperOrigin-RevId: 288526813
Change-Id: I86efd4d804c0c873856307cf4a969270eb7bbae8# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Miscellaneous utilities that don't fit anywhere else."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import sys
import six
class BasicRef(object):
"""This shim emulates the nonlocal keyword in Py2-compatible source."""
def __init__(self, init_value):
self.value = init_value
def deprecated_py2_support(module_name):
"""Swaps calling module with a Py2-specific implementation. Noop in Py3."""
if six.PY2:
legacy_module = importlib.import_module(module_name + '_deprecated_py2')
sys.modules[module_name] = legacy_module
|
<commit_before># Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Miscellaneous utilities that don't fit anywhere else."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import sys
import types
import six
class BasicRef(object):
"""This shim emulates the nonlocal keyword in Py2-compatible source."""
def __init__(self, init_value):
self.value = init_value
def deprecated_py2_support(module_name):
"""Swaps calling module with a Py2-specific implementation. Noop in Py3."""
if six.PY2:
legacy_module = importlib.import_module(module_name + '_deprecated_py2')
current_module = sys.modules[module_name]
for name, target_val in legacy_module.__dict__.items():
if isinstance(target_val, types.FunctionType):
replacement = types.FunctionType(
target_val.__code__, current_module.__dict__, target_val.__name__,
target_val.__defaults__, target_val.__closure__)
current_module.__dict__[name] = replacement
<commit_msg>Swap out modules in py2 mode in a cleaner fashion.
PiperOrigin-RevId: 288526813
Change-Id: I86efd4d804c0c873856307cf4a969270eb7bbae8<commit_after># Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Miscellaneous utilities that don't fit anywhere else."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import sys
import six
class BasicRef(object):
"""This shim emulates the nonlocal keyword in Py2-compatible source."""
def __init__(self, init_value):
self.value = init_value
def deprecated_py2_support(module_name):
"""Swaps calling module with a Py2-specific implementation. Noop in Py3."""
if six.PY2:
legacy_module = importlib.import_module(module_name + '_deprecated_py2')
sys.modules[module_name] = legacy_module
|
24fe675fa2cedb4cce6596ea46dbed24b31ee072
|
python/fix-srt-subtitle/fix-srt-subtitle.py
|
python/fix-srt-subtitle/fix-srt-subtitle.py
|
#!/usr/bin/env python
import optparse
import sys
def main():
empty_subtitle = '''1
00:00:00,000 --> 00:00:00,001
'''
parser, infile_name, outfile_name = parse_options()
#if len(sys.argv) < 3:
# sys.exit('USAGE: {0} SRT_INFILE SRT_OUTFILE'.format(sys.argv[0]))
# TODO: with contextlib.closing(urllib2.urlopen(url)) as u:
infile = open(infile_name)
outfile = open(outfile_name, 'w')
'''
Add an empty subtitle at the beginning of the file to fix avconv subtitle
offset issues. Also makes sure subtitle numbering starts at 1. Starting at 0
causes avconv to fail importing the subtitle.
'''
outfile.write(empty_subtitle)
# Renumber remaining subtitles
subtitle_number = 2
prev_line = ''
for line in infile:
line = line.strip()
# Optionally reencode subtitles as utf8
if parser.values.ensure_utf8 == True:
try:
line = line.decode('utf8').encode('utf8')
except UnicodeDecodeError:
line = line.decode('latin1').encode('utf8')
if prev_line == '':
line = str(subtitle_number)
subtitle_number += 1
outfile.write('{0}\n'.format(line))
prev_line = line
outfile.close()
infile.close()
def parse_options():
''' set up and parse command line arguments
'''
# define a custom usage message
usage = ('usage: %prog INPUT_FILE OUTPUT_FILE [options]\n'
'\tWhere INPUT_FILE = path to SRT input file\n'
'\tand OUTPUT_FILE = path to SRT output file')
parser = optparse.OptionParser(usage=usage)
# command line options to parse
parser.add_option(
'-u', '--ensure-utf8', action='store_true', dest='ensure_utf8',
default=False, help='Try to ensure the output file is UTF8-encoded'
)
# parse the arguments
(options, args) = parser.parse_args()
if len(args) < 2:
parser.print_help()
sys.exit('Error: INPUT_FILE and OUTPUT_FILE are required')
infile_name = args[0]
outfile_name = args[1]
return parser, infile_name, outfile_name
if __name__ == '__main__':
main()
|
Add script for fixing SRT subtitle files
|
Add script for fixing SRT subtitle files
|
Python
|
mit
|
bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile
|
Add script for fixing SRT subtitle files
|
#!/usr/bin/env python
import optparse
import sys
def main():
empty_subtitle = '''1
00:00:00,000 --> 00:00:00,001
'''
parser, infile_name, outfile_name = parse_options()
#if len(sys.argv) < 3:
# sys.exit('USAGE: {0} SRT_INFILE SRT_OUTFILE'.format(sys.argv[0]))
# TODO: with contextlib.closing(urllib2.urlopen(url)) as u:
infile = open(infile_name)
outfile = open(outfile_name, 'w')
'''
Add an empty subtitle at the beginning of the file to fix avconv subtitle
offset issues. Also makes sure subtitle numbering starts at 1. Starting at 0
causes avconv to fail importing the subtitle.
'''
outfile.write(empty_subtitle)
# Renumber remaining subtitles
subtitle_number = 2
prev_line = ''
for line in infile:
line = line.strip()
# Optionally reencode subtitles as utf8
if parser.values.ensure_utf8 == True:
try:
line = line.decode('utf8').encode('utf8')
except UnicodeDecodeError:
line = line.decode('latin1').encode('utf8')
if prev_line == '':
line = str(subtitle_number)
subtitle_number += 1
outfile.write('{0}\n'.format(line))
prev_line = line
outfile.close()
infile.close()
def parse_options():
''' set up and parse command line arguments
'''
# define a custom usage message
usage = ('usage: %prog INPUT_FILE OUTPUT_FILE [options]\n'
'\tWhere INPUT_FILE = path to SRT input file\n'
'\tand OUTPUT_FILE = path to SRT output file')
parser = optparse.OptionParser(usage=usage)
# command line options to parse
parser.add_option(
'-u', '--ensure-utf8', action='store_true', dest='ensure_utf8',
default=False, help='Try to ensure the output file is UTF8-encoded'
)
# parse the arguments
(options, args) = parser.parse_args()
if len(args) < 2:
parser.print_help()
sys.exit('Error: INPUT_FILE and OUTPUT_FILE are required')
infile_name = args[0]
outfile_name = args[1]
return parser, infile_name, outfile_name
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script for fixing SRT subtitle files<commit_after>
|
#!/usr/bin/env python
import optparse
import sys
def main():
empty_subtitle = '''1
00:00:00,000 --> 00:00:00,001
'''
parser, infile_name, outfile_name = parse_options()
#if len(sys.argv) < 3:
# sys.exit('USAGE: {0} SRT_INFILE SRT_OUTFILE'.format(sys.argv[0]))
# TODO: with contextlib.closing(urllib2.urlopen(url)) as u:
infile = open(infile_name)
outfile = open(outfile_name, 'w')
'''
Add an empty subtitle at the beginning of the file to fix avconv subtitle
offset issues. Also makes sure subtitle numbering starts at 1. Starting at 0
causes avconv to fail importing the subtitle.
'''
outfile.write(empty_subtitle)
# Renumber remaining subtitles
subtitle_number = 2
prev_line = ''
for line in infile:
line = line.strip()
# Optionally reencode subtitles as utf8
if parser.values.ensure_utf8 == True:
try:
line = line.decode('utf8').encode('utf8')
except UnicodeDecodeError:
line = line.decode('latin1').encode('utf8')
if prev_line == '':
line = str(subtitle_number)
subtitle_number += 1
outfile.write('{0}\n'.format(line))
prev_line = line
outfile.close()
infile.close()
def parse_options():
''' set up and parse command line arguments
'''
# define a custom usage message
usage = ('usage: %prog INPUT_FILE OUTPUT_FILE [options]\n'
'\tWhere INPUT_FILE = path to SRT input file\n'
'\tand OUTPUT_FILE = path to SRT output file')
parser = optparse.OptionParser(usage=usage)
# command line options to parse
parser.add_option(
'-u', '--ensure-utf8', action='store_true', dest='ensure_utf8',
default=False, help='Try to ensure the output file is UTF8-encoded'
)
# parse the arguments
(options, args) = parser.parse_args()
if len(args) < 2:
parser.print_help()
sys.exit('Error: INPUT_FILE and OUTPUT_FILE are required')
infile_name = args[0]
outfile_name = args[1]
return parser, infile_name, outfile_name
if __name__ == '__main__':
main()
|
Add script for fixing SRT subtitle files#!/usr/bin/env python
import optparse
import sys
def main():
empty_subtitle = '''1
00:00:00,000 --> 00:00:00,001
'''
parser, infile_name, outfile_name = parse_options()
#if len(sys.argv) < 3:
# sys.exit('USAGE: {0} SRT_INFILE SRT_OUTFILE'.format(sys.argv[0]))
# TODO: with contextlib.closing(urllib2.urlopen(url)) as u:
infile = open(infile_name)
outfile = open(outfile_name, 'w')
'''
Add an empty subtitle at the beginning of the file to fix avconv subtitle
offset issues. Also makes sure subtitle numbering starts at 1. Starting at 0
causes avconv to fail importing the subtitle.
'''
outfile.write(empty_subtitle)
# Renumber remaining subtitles
subtitle_number = 2
prev_line = ''
for line in infile:
line = line.strip()
# Optionally reencode subtitles as utf8
if parser.values.ensure_utf8 == True:
try:
line = line.decode('utf8').encode('utf8')
except UnicodeDecodeError:
line = line.decode('latin1').encode('utf8')
if prev_line == '':
line = str(subtitle_number)
subtitle_number += 1
outfile.write('{0}\n'.format(line))
prev_line = line
outfile.close()
infile.close()
def parse_options():
''' set up and parse command line arguments
'''
# define a custom usage message
usage = ('usage: %prog INPUT_FILE OUTPUT_FILE [options]\n'
'\tWhere INPUT_FILE = path to SRT input file\n'
'\tand OUTPUT_FILE = path to SRT output file')
parser = optparse.OptionParser(usage=usage)
# command line options to parse
parser.add_option(
'-u', '--ensure-utf8', action='store_true', dest='ensure_utf8',
default=False, help='Try to ensure the output file is UTF8-encoded'
)
# parse the arguments
(options, args) = parser.parse_args()
if len(args) < 2:
parser.print_help()
sys.exit('Error: INPUT_FILE and OUTPUT_FILE are required')
infile_name = args[0]
outfile_name = args[1]
return parser, infile_name, outfile_name
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script for fixing SRT subtitle files<commit_after>#!/usr/bin/env python
import optparse
import sys
def main():
empty_subtitle = '''1
00:00:00,000 --> 00:00:00,001
'''
parser, infile_name, outfile_name = parse_options()
#if len(sys.argv) < 3:
# sys.exit('USAGE: {0} SRT_INFILE SRT_OUTFILE'.format(sys.argv[0]))
# TODO: with contextlib.closing(urllib2.urlopen(url)) as u:
infile = open(infile_name)
outfile = open(outfile_name, 'w')
'''
Add an empty subtitle at the beginning of the file to fix avconv subtitle
offset issues. Also makes sure subtitle numbering starts at 1. Starting at 0
causes avconv to fail importing the subtitle.
'''
outfile.write(empty_subtitle)
# Renumber remaining subtitles
subtitle_number = 2
prev_line = ''
for line in infile:
line = line.strip()
# Optionally reencode subtitles as utf8
if parser.values.ensure_utf8 == True:
try:
line = line.decode('utf8').encode('utf8')
except UnicodeDecodeError:
line = line.decode('latin1').encode('utf8')
if prev_line == '':
line = str(subtitle_number)
subtitle_number += 1
outfile.write('{0}\n'.format(line))
prev_line = line
outfile.close()
infile.close()
def parse_options():
''' set up and parse command line arguments
'''
# define a custom usage message
usage = ('usage: %prog INPUT_FILE OUTPUT_FILE [options]\n'
'\tWhere INPUT_FILE = path to SRT input file\n'
'\tand OUTPUT_FILE = path to SRT output file')
parser = optparse.OptionParser(usage=usage)
# command line options to parse
parser.add_option(
'-u', '--ensure-utf8', action='store_true', dest='ensure_utf8',
default=False, help='Try to ensure the output file is UTF8-encoded'
)
# parse the arguments
(options, args) = parser.parse_args()
if len(args) < 2:
parser.print_help()
sys.exit('Error: INPUT_FILE and OUTPUT_FILE are required')
infile_name = args[0]
outfile_name = args[1]
return parser, infile_name, outfile_name
if __name__ == '__main__':
main()
|
|
deda54b8b97399019d1d7378d1e45b223436494e
|
mzalendo/core/management/commands/core_extend_party_memberships.py
|
mzalendo/core/management/commands/core_extend_party_memberships.py
|
# This command is intended to fix issue 550:
#
# https://github.com/mysociety/mzalendo/issues/550
#
# "As mentioned in #494 there are lots of party membership positions
# that have an end date of 2012 - meaning that on the site many
# positions are hidden by default. These party memberships should
# probably be open ended and could be changed to have end dates of
# 'future'"
#
# This script looks for every person in the database, and takes the
# most recent party membery position with the most recent end_date -
# if that position ends with ApproximateDate(2012), change it to
# ApproximateDate(future=True).
from optparse import make_option
import sys
from django.core.management.base import NoArgsCommand, CommandError
from django_date_extensions.fields import ApproximateDate
from core.models import Person
class Command(NoArgsCommand):
help = 'Change party memberships that end in 2012 to end in "future".'
option_list = NoArgsCommand.option_list + (
make_option('--commit', action='store_true', dest='commit', help='Actually update the database'),
)
def handle_noargs(self, **options):
for person in Person.objects.all():
party_positions = person.position_set.all().filter(title__slug='member').filter(organisation__kind__slug='party')
if not party_positions:
continue
most_recent_party_position = party_positions[0]
if most_recent_party_position.end_date == ApproximateDate(2012):
most_recent_party_position.end_date = ApproximateDate(future=True)
message = "2012 end_date to future for %s" % (most_recent_party_position,)
if options['commit']:
most_recent_party_position.save()
print >> sys.stderr, "Changing " + message
else:
print >> sys.stderr, "Not changing " + message + "because --commit wasn't specified"
|
Add an admin command to fix party membership position end_dates
|
Add an admin command to fix party membership position end_dates
This is a fix for issue #550 - some party membership positions
were set to 2012, so they no longer appear as current
party memberships. Instead, we should set these to 'future'.
We will shortly be importing the new candidate data, in which
lots of these will be ended and new party positions will be
created for a new party.
|
Python
|
agpl-3.0
|
hzj123/56th,mysociety/pombola,geoffkilpin/pombola,patricmutwiri/pombola,mysociety/pombola,geoffkilpin/pombola,patricmutwiri/pombola,hzj123/56th,geoffkilpin/pombola,mysociety/pombola,geoffkilpin/pombola,patricmutwiri/pombola,geoffkilpin/pombola,hzj123/56th,geoffkilpin/pombola,ken-muturi/pombola,patricmutwiri/pombola,patricmutwiri/pombola,ken-muturi/pombola,ken-muturi/pombola,ken-muturi/pombola,hzj123/56th,mysociety/pombola,hzj123/56th,ken-muturi/pombola,ken-muturi/pombola,patricmutwiri/pombola,mysociety/pombola,hzj123/56th,mysociety/pombola
|
Add an admin command to fix party membership position end_dates
This is a fix for issue #550 - some party membership positions
were set to 2012, so they no longer appear as current
party memberships. Instead, we should set these to 'future'.
We will shortly be importing the new candidate data, in which
lots of these will be ended and new party positions will be
created for a new party.
|
# This command is intended to fix issue 550:
#
# https://github.com/mysociety/mzalendo/issues/550
#
# "As mentioned in #494 there are lots of party membership positions
# that have an end date of 2012 - meaning that on the site many
# positions are hidden by default. These party memberships should
# probably be open ended and could be changed to have end dates of
# 'future'"
#
# This script looks for every person in the database, and takes the
# most recent party membery position with the most recent end_date -
# if that position ends with ApproximateDate(2012), change it to
# ApproximateDate(future=True).
from optparse import make_option
import sys
from django.core.management.base import NoArgsCommand, CommandError
from django_date_extensions.fields import ApproximateDate
from core.models import Person
class Command(NoArgsCommand):
help = 'Change party memberships that end in 2012 to end in "future".'
option_list = NoArgsCommand.option_list + (
make_option('--commit', action='store_true', dest='commit', help='Actually update the database'),
)
def handle_noargs(self, **options):
for person in Person.objects.all():
party_positions = person.position_set.all().filter(title__slug='member').filter(organisation__kind__slug='party')
if not party_positions:
continue
most_recent_party_position = party_positions[0]
if most_recent_party_position.end_date == ApproximateDate(2012):
most_recent_party_position.end_date = ApproximateDate(future=True)
message = "2012 end_date to future for %s" % (most_recent_party_position,)
if options['commit']:
most_recent_party_position.save()
print >> sys.stderr, "Changing " + message
else:
print >> sys.stderr, "Not changing " + message + "because --commit wasn't specified"
|
<commit_before><commit_msg>Add an admin command to fix party membership position end_dates
This is a fix for issue #550 - some party membership positions
were set to 2012, so they no longer appear as current
party memberships. Instead, we should set these to 'future'.
We will shortly be importing the new candidate data, in which
lots of these will be ended and new party positions will be
created for a new party.<commit_after>
|
# This command is intended to fix issue 550:
#
# https://github.com/mysociety/mzalendo/issues/550
#
# "As mentioned in #494 there are lots of party membership positions
# that have an end date of 2012 - meaning that on the site many
# positions are hidden by default. These party memberships should
# probably be open ended and could be changed to have end dates of
# 'future'"
#
# This script looks for every person in the database, and takes the
# most recent party membery position with the most recent end_date -
# if that position ends with ApproximateDate(2012), change it to
# ApproximateDate(future=True).
from optparse import make_option
import sys
from django.core.management.base import NoArgsCommand, CommandError
from django_date_extensions.fields import ApproximateDate
from core.models import Person
class Command(NoArgsCommand):
help = 'Change party memberships that end in 2012 to end in "future".'
option_list = NoArgsCommand.option_list + (
make_option('--commit', action='store_true', dest='commit', help='Actually update the database'),
)
def handle_noargs(self, **options):
for person in Person.objects.all():
party_positions = person.position_set.all().filter(title__slug='member').filter(organisation__kind__slug='party')
if not party_positions:
continue
most_recent_party_position = party_positions[0]
if most_recent_party_position.end_date == ApproximateDate(2012):
most_recent_party_position.end_date = ApproximateDate(future=True)
message = "2012 end_date to future for %s" % (most_recent_party_position,)
if options['commit']:
most_recent_party_position.save()
print >> sys.stderr, "Changing " + message
else:
print >> sys.stderr, "Not changing " + message + "because --commit wasn't specified"
|
Add an admin command to fix party membership position end_dates
This is a fix for issue #550 - some party membership positions
were set to 2012, so they no longer appear as current
party memberships. Instead, we should set these to 'future'.
We will shortly be importing the new candidate data, in which
lots of these will be ended and new party positions will be
created for a new party.# This command is intended to fix issue 550:
#
# https://github.com/mysociety/mzalendo/issues/550
#
# "As mentioned in #494 there are lots of party membership positions
# that have an end date of 2012 - meaning that on the site many
# positions are hidden by default. These party memberships should
# probably be open ended and could be changed to have end dates of
# 'future'"
#
# This script looks for every person in the database, and takes the
# most recent party membery position with the most recent end_date -
# if that position ends with ApproximateDate(2012), change it to
# ApproximateDate(future=True).
from optparse import make_option
import sys
from django.core.management.base import NoArgsCommand, CommandError
from django_date_extensions.fields import ApproximateDate
from core.models import Person
class Command(NoArgsCommand):
help = 'Change party memberships that end in 2012 to end in "future".'
option_list = NoArgsCommand.option_list + (
make_option('--commit', action='store_true', dest='commit', help='Actually update the database'),
)
def handle_noargs(self, **options):
for person in Person.objects.all():
party_positions = person.position_set.all().filter(title__slug='member').filter(organisation__kind__slug='party')
if not party_positions:
continue
most_recent_party_position = party_positions[0]
if most_recent_party_position.end_date == ApproximateDate(2012):
most_recent_party_position.end_date = ApproximateDate(future=True)
message = "2012 end_date to future for %s" % (most_recent_party_position,)
if options['commit']:
most_recent_party_position.save()
print >> sys.stderr, "Changing " + message
else:
print >> sys.stderr, "Not changing " + message + "because --commit wasn't specified"
|
<commit_before><commit_msg>Add an admin command to fix party membership position end_dates
This is a fix for issue #550 - some party membership positions
were set to 2012, so they no longer appear as current
party memberships. Instead, we should set these to 'future'.
We will shortly be importing the new candidate data, in which
lots of these will be ended and new party positions will be
created for a new party.<commit_after># This command is intended to fix issue 550:
#
# https://github.com/mysociety/mzalendo/issues/550
#
# "As mentioned in #494 there are lots of party membership positions
# that have an end date of 2012 - meaning that on the site many
# positions are hidden by default. These party memberships should
# probably be open ended and could be changed to have end dates of
# 'future'"
#
# This script looks for every person in the database, and takes the
# most recent party membery position with the most recent end_date -
# if that position ends with ApproximateDate(2012), change it to
# ApproximateDate(future=True).
from optparse import make_option
import sys
from django.core.management.base import NoArgsCommand, CommandError
from django_date_extensions.fields import ApproximateDate
from core.models import Person
class Command(NoArgsCommand):
help = 'Change party memberships that end in 2012 to end in "future".'
option_list = NoArgsCommand.option_list + (
make_option('--commit', action='store_true', dest='commit', help='Actually update the database'),
)
def handle_noargs(self, **options):
for person in Person.objects.all():
party_positions = person.position_set.all().filter(title__slug='member').filter(organisation__kind__slug='party')
if not party_positions:
continue
most_recent_party_position = party_positions[0]
if most_recent_party_position.end_date == ApproximateDate(2012):
most_recent_party_position.end_date = ApproximateDate(future=True)
message = "2012 end_date to future for %s" % (most_recent_party_position,)
if options['commit']:
most_recent_party_position.save()
print >> sys.stderr, "Changing " + message
else:
print >> sys.stderr, "Not changing " + message + "because --commit wasn't specified"
|
|
079df6a4908cb03dc5a6ca9b350ffc81b04bdac4
|
smsgateway/factories.py
|
smsgateway/factories.py
|
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import factory
from smsgateway.models import SMS
class SMSFactory(factory.DjangoModelFactory):
FACTORY_FOR = SMS
content = 'This is a test'
sender = factory.Sequence(lambda n: u"+32476{0:06d}".format(n))
to = factory.Sequence(lambda n: u"+32476{0:06d}".format(n))
|
Add factory for text messages
|
Add factory for text messages
|
Python
|
bsd-3-clause
|
mvpoland/django-smsgateway,mvpoland/django-smsgateway,mvpoland/django-smsgateway
|
Add factory for text messages
|
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import factory
from smsgateway.models import SMS
class SMSFactory(factory.DjangoModelFactory):
FACTORY_FOR = SMS
content = 'This is a test'
sender = factory.Sequence(lambda n: u"+32476{0:06d}".format(n))
to = factory.Sequence(lambda n: u"+32476{0:06d}".format(n))
|
<commit_before><commit_msg>Add factory for text messages<commit_after>
|
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import factory
from smsgateway.models import SMS
class SMSFactory(factory.DjangoModelFactory):
FACTORY_FOR = SMS
content = 'This is a test'
sender = factory.Sequence(lambda n: u"+32476{0:06d}".format(n))
to = factory.Sequence(lambda n: u"+32476{0:06d}".format(n))
|
Add factory for text messages# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import factory
from smsgateway.models import SMS
class SMSFactory(factory.DjangoModelFactory):
FACTORY_FOR = SMS
content = 'This is a test'
sender = factory.Sequence(lambda n: u"+32476{0:06d}".format(n))
to = factory.Sequence(lambda n: u"+32476{0:06d}".format(n))
|
<commit_before><commit_msg>Add factory for text messages<commit_after># -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import factory
from smsgateway.models import SMS
class SMSFactory(factory.DjangoModelFactory):
FACTORY_FOR = SMS
content = 'This is a test'
sender = factory.Sequence(lambda n: u"+32476{0:06d}".format(n))
to = factory.Sequence(lambda n: u"+32476{0:06d}".format(n))
|
|
8841ed6de25e1b5752ba6bcbd8527820526ccc22
|
tests/test_regex.py
|
tests/test_regex.py
|
import unittest
import importlib
import sublime
from SublimeLinter.lint.linter import get_linter_settings
from SublimeLinter.tests.parameterized import parameterized as p
LinterModule = importlib.import_module('SublimeLinter-annotations.linter')
Linter = LinterModule.Annotations
class TestRegex(unittest.TestCase):
def create_window(self):
sublime.run_command("new_window")
window = sublime.active_window()
self.addCleanup(self.close_window, window)
return window
def close_window(self, window):
window.run_command('close_window')
def create_view(self, window):
view = window.new_file()
self.addCleanup(self.close_view, view)
return view
def close_view(self, view):
view.set_scratch(True)
view.close()
def assertMatch(self, string, expected):
linter = Linter(sublime.View(0), {})
actual = list(linter.find_errors(string))[0]
# `find_errors` fills out more information we don't want to write down
# in the examples
self.assertEqual({k: actual[k] for k in expected.keys()}, expected)
def assertNoMatch(self, string):
linter = Linter(sublime.View(0), {})
actual = list(linter.find_errors(string))
self.assertFalse(actual)
@p.expand([
("# TODO The message", "scope:source.python", {"line": 0, "col": 2, "message": "The message"})
])
def test_a(self, view_content, syntax, expected):
window = self.create_window()
view = self.create_view(window)
view.assign_syntax(syntax)
view.run_command('append', {'characters': view_content})
settings = get_linter_settings(Linter, view, context=None)
linter = Linter(view, settings)
actual = list(linter.find_errors("_ignored by plugin"))[0]
self.assertEqual({k: actual[k] for k in expected.keys()}, expected)
|
Add basic test to prove the concept
|
Add basic test to prove the concept
|
Python
|
mit
|
SublimeLinter/SublimeLinter-annotations
|
Add basic test to prove the concept
|
import unittest
import importlib
import sublime
from SublimeLinter.lint.linter import get_linter_settings
from SublimeLinter.tests.parameterized import parameterized as p
LinterModule = importlib.import_module('SublimeLinter-annotations.linter')
Linter = LinterModule.Annotations
class TestRegex(unittest.TestCase):
def create_window(self):
sublime.run_command("new_window")
window = sublime.active_window()
self.addCleanup(self.close_window, window)
return window
def close_window(self, window):
window.run_command('close_window')
def create_view(self, window):
view = window.new_file()
self.addCleanup(self.close_view, view)
return view
def close_view(self, view):
view.set_scratch(True)
view.close()
def assertMatch(self, string, expected):
linter = Linter(sublime.View(0), {})
actual = list(linter.find_errors(string))[0]
# `find_errors` fills out more information we don't want to write down
# in the examples
self.assertEqual({k: actual[k] for k in expected.keys()}, expected)
def assertNoMatch(self, string):
linter = Linter(sublime.View(0), {})
actual = list(linter.find_errors(string))
self.assertFalse(actual)
@p.expand([
("# TODO The message", "scope:source.python", {"line": 0, "col": 2, "message": "The message"})
])
def test_a(self, view_content, syntax, expected):
window = self.create_window()
view = self.create_view(window)
view.assign_syntax(syntax)
view.run_command('append', {'characters': view_content})
settings = get_linter_settings(Linter, view, context=None)
linter = Linter(view, settings)
actual = list(linter.find_errors("_ignored by plugin"))[0]
self.assertEqual({k: actual[k] for k in expected.keys()}, expected)
|
<commit_before><commit_msg>Add basic test to prove the concept<commit_after>
|
import unittest
import importlib
import sublime
from SublimeLinter.lint.linter import get_linter_settings
from SublimeLinter.tests.parameterized import parameterized as p
LinterModule = importlib.import_module('SublimeLinter-annotations.linter')
Linter = LinterModule.Annotations
class TestRegex(unittest.TestCase):
def create_window(self):
sublime.run_command("new_window")
window = sublime.active_window()
self.addCleanup(self.close_window, window)
return window
def close_window(self, window):
window.run_command('close_window')
def create_view(self, window):
view = window.new_file()
self.addCleanup(self.close_view, view)
return view
def close_view(self, view):
view.set_scratch(True)
view.close()
def assertMatch(self, string, expected):
linter = Linter(sublime.View(0), {})
actual = list(linter.find_errors(string))[0]
# `find_errors` fills out more information we don't want to write down
# in the examples
self.assertEqual({k: actual[k] for k in expected.keys()}, expected)
def assertNoMatch(self, string):
linter = Linter(sublime.View(0), {})
actual = list(linter.find_errors(string))
self.assertFalse(actual)
@p.expand([
("# TODO The message", "scope:source.python", {"line": 0, "col": 2, "message": "The message"})
])
def test_a(self, view_content, syntax, expected):
window = self.create_window()
view = self.create_view(window)
view.assign_syntax(syntax)
view.run_command('append', {'characters': view_content})
settings = get_linter_settings(Linter, view, context=None)
linter = Linter(view, settings)
actual = list(linter.find_errors("_ignored by plugin"))[0]
self.assertEqual({k: actual[k] for k in expected.keys()}, expected)
|
Add basic test to prove the conceptimport unittest
import importlib
import sublime
from SublimeLinter.lint.linter import get_linter_settings
from SublimeLinter.tests.parameterized import parameterized as p
LinterModule = importlib.import_module('SublimeLinter-annotations.linter')
Linter = LinterModule.Annotations
class TestRegex(unittest.TestCase):
def create_window(self):
sublime.run_command("new_window")
window = sublime.active_window()
self.addCleanup(self.close_window, window)
return window
def close_window(self, window):
window.run_command('close_window')
def create_view(self, window):
view = window.new_file()
self.addCleanup(self.close_view, view)
return view
def close_view(self, view):
view.set_scratch(True)
view.close()
def assertMatch(self, string, expected):
linter = Linter(sublime.View(0), {})
actual = list(linter.find_errors(string))[0]
# `find_errors` fills out more information we don't want to write down
# in the examples
self.assertEqual({k: actual[k] for k in expected.keys()}, expected)
def assertNoMatch(self, string):
linter = Linter(sublime.View(0), {})
actual = list(linter.find_errors(string))
self.assertFalse(actual)
@p.expand([
("# TODO The message", "scope:source.python", {"line": 0, "col": 2, "message": "The message"})
])
def test_a(self, view_content, syntax, expected):
window = self.create_window()
view = self.create_view(window)
view.assign_syntax(syntax)
view.run_command('append', {'characters': view_content})
settings = get_linter_settings(Linter, view, context=None)
linter = Linter(view, settings)
actual = list(linter.find_errors("_ignored by plugin"))[0]
self.assertEqual({k: actual[k] for k in expected.keys()}, expected)
|
<commit_before><commit_msg>Add basic test to prove the concept<commit_after>import unittest
import importlib
import sublime
from SublimeLinter.lint.linter import get_linter_settings
from SublimeLinter.tests.parameterized import parameterized as p
LinterModule = importlib.import_module('SublimeLinter-annotations.linter')
Linter = LinterModule.Annotations
class TestRegex(unittest.TestCase):
def create_window(self):
sublime.run_command("new_window")
window = sublime.active_window()
self.addCleanup(self.close_window, window)
return window
def close_window(self, window):
window.run_command('close_window')
def create_view(self, window):
view = window.new_file()
self.addCleanup(self.close_view, view)
return view
def close_view(self, view):
view.set_scratch(True)
view.close()
def assertMatch(self, string, expected):
linter = Linter(sublime.View(0), {})
actual = list(linter.find_errors(string))[0]
# `find_errors` fills out more information we don't want to write down
# in the examples
self.assertEqual({k: actual[k] for k in expected.keys()}, expected)
def assertNoMatch(self, string):
linter = Linter(sublime.View(0), {})
actual = list(linter.find_errors(string))
self.assertFalse(actual)
@p.expand([
("# TODO The message", "scope:source.python", {"line": 0, "col": 2, "message": "The message"})
])
def test_a(self, view_content, syntax, expected):
window = self.create_window()
view = self.create_view(window)
view.assign_syntax(syntax)
view.run_command('append', {'characters': view_content})
settings = get_linter_settings(Linter, view, context=None)
linter = Linter(view, settings)
actual = list(linter.find_errors("_ignored by plugin"))[0]
self.assertEqual({k: actual[k] for k in expected.keys()}, expected)
|
|
82a8e092260d5768babc5d498ac0ddf40c7b3634
|
tests/views/test_admin_committee_page.py
|
tests/views/test_admin_committee_page.py
|
from tests import PMGLiveServerTestCase
from pmg.models import db
from tests.fixtures import (
dbfixture, UserData, CommitteeData, MembershipData
)
class TestAdminCommitteePage(PMGLiveServerTestCase):
def setUp(self):
super(TestAdminCommitteePage, self).setUp()
self.fx = dbfixture.data(
UserData, CommitteeData, MembershipData
)
self.fx.setup()
self.user = self.fx.UserData.admin
def tearDown(self):
self.fx.teardown()
super(TestAdminCommitteePage, self).tearDown()
def test_view_admin_committee_page(self):
"""
Test view admin committee page (http://pmg.test:5000/admin/committee)
"""
self.get_page_contents_as_user(
self.user, "http://pmg.test:5000/admin/committee")
self.assertIn('Committees', self.html)
self.containsCommittee(self.fx.CommitteeData.communications)
self.containsCommittee(self.fx.CommitteeData.arts)
self.containsCommittee(self.fx.CommitteeData.constitutional_review)
def containsCommittee(self, committee):
self.assertIn(committee.name, self.html)
self.assertIn(committee.house.name, self.html)
self.assertIn(str(len(committee.memberships)), self.html)
|
Add admin committee view test
|
Add admin committee view test
|
Python
|
apache-2.0
|
Code4SA/pmg-cms-2,Code4SA/pmg-cms-2,Code4SA/pmg-cms-2
|
Add admin committee view test
|
from tests import PMGLiveServerTestCase
from pmg.models import db
from tests.fixtures import (
dbfixture, UserData, CommitteeData, MembershipData
)
class TestAdminCommitteePage(PMGLiveServerTestCase):
def setUp(self):
super(TestAdminCommitteePage, self).setUp()
self.fx = dbfixture.data(
UserData, CommitteeData, MembershipData
)
self.fx.setup()
self.user = self.fx.UserData.admin
def tearDown(self):
self.fx.teardown()
super(TestAdminCommitteePage, self).tearDown()
def test_view_admin_committee_page(self):
"""
Test view admin committee page (http://pmg.test:5000/admin/committee)
"""
self.get_page_contents_as_user(
self.user, "http://pmg.test:5000/admin/committee")
self.assertIn('Committees', self.html)
self.containsCommittee(self.fx.CommitteeData.communications)
self.containsCommittee(self.fx.CommitteeData.arts)
self.containsCommittee(self.fx.CommitteeData.constitutional_review)
def containsCommittee(self, committee):
self.assertIn(committee.name, self.html)
self.assertIn(committee.house.name, self.html)
self.assertIn(str(len(committee.memberships)), self.html)
|
<commit_before><commit_msg>Add admin committee view test<commit_after>
|
from tests import PMGLiveServerTestCase
from pmg.models import db
from tests.fixtures import (
dbfixture, UserData, CommitteeData, MembershipData
)
class TestAdminCommitteePage(PMGLiveServerTestCase):
def setUp(self):
super(TestAdminCommitteePage, self).setUp()
self.fx = dbfixture.data(
UserData, CommitteeData, MembershipData
)
self.fx.setup()
self.user = self.fx.UserData.admin
def tearDown(self):
self.fx.teardown()
super(TestAdminCommitteePage, self).tearDown()
def test_view_admin_committee_page(self):
"""
Test view admin committee page (http://pmg.test:5000/admin/committee)
"""
self.get_page_contents_as_user(
self.user, "http://pmg.test:5000/admin/committee")
self.assertIn('Committees', self.html)
self.containsCommittee(self.fx.CommitteeData.communications)
self.containsCommittee(self.fx.CommitteeData.arts)
self.containsCommittee(self.fx.CommitteeData.constitutional_review)
def containsCommittee(self, committee):
self.assertIn(committee.name, self.html)
self.assertIn(committee.house.name, self.html)
self.assertIn(str(len(committee.memberships)), self.html)
|
Add admin committee view testfrom tests import PMGLiveServerTestCase
from pmg.models import db
from tests.fixtures import (
dbfixture, UserData, CommitteeData, MembershipData
)
class TestAdminCommitteePage(PMGLiveServerTestCase):
def setUp(self):
super(TestAdminCommitteePage, self).setUp()
self.fx = dbfixture.data(
UserData, CommitteeData, MembershipData
)
self.fx.setup()
self.user = self.fx.UserData.admin
def tearDown(self):
self.fx.teardown()
super(TestAdminCommitteePage, self).tearDown()
def test_view_admin_committee_page(self):
"""
Test view admin committee page (http://pmg.test:5000/admin/committee)
"""
self.get_page_contents_as_user(
self.user, "http://pmg.test:5000/admin/committee")
self.assertIn('Committees', self.html)
self.containsCommittee(self.fx.CommitteeData.communications)
self.containsCommittee(self.fx.CommitteeData.arts)
self.containsCommittee(self.fx.CommitteeData.constitutional_review)
def containsCommittee(self, committee):
self.assertIn(committee.name, self.html)
self.assertIn(committee.house.name, self.html)
self.assertIn(str(len(committee.memberships)), self.html)
|
<commit_before><commit_msg>Add admin committee view test<commit_after>from tests import PMGLiveServerTestCase
from pmg.models import db
from tests.fixtures import (
dbfixture, UserData, CommitteeData, MembershipData
)
class TestAdminCommitteePage(PMGLiveServerTestCase):
def setUp(self):
super(TestAdminCommitteePage, self).setUp()
self.fx = dbfixture.data(
UserData, CommitteeData, MembershipData
)
self.fx.setup()
self.user = self.fx.UserData.admin
def tearDown(self):
self.fx.teardown()
super(TestAdminCommitteePage, self).tearDown()
def test_view_admin_committee_page(self):
"""
Test view admin committee page (http://pmg.test:5000/admin/committee)
"""
self.get_page_contents_as_user(
self.user, "http://pmg.test:5000/admin/committee")
self.assertIn('Committees', self.html)
self.containsCommittee(self.fx.CommitteeData.communications)
self.containsCommittee(self.fx.CommitteeData.arts)
self.containsCommittee(self.fx.CommitteeData.constitutional_review)
def containsCommittee(self, committee):
self.assertIn(committee.name, self.html)
self.assertIn(committee.house.name, self.html)
self.assertIn(str(len(committee.memberships)), self.html)
|
|
dfdbfa7f68a80cbf3976ce9c6d7cb8771e038209
|
conf_site/proposals/migrations/0008_alter_urlfields.py
|
conf_site/proposals/migrations/0008_alter_urlfields.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2018-01-16 21:29
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('proposals', '0007_add_urlfields'),
]
operations = [
migrations.AlterField(
model_name='proposal',
name='code_url',
field=models.URLField(blank=True, default=b'', help_text=b"Location of this proposal's code repository (e.g. Github).", max_length=2083, verbose_name=b'Repository'),
),
migrations.AlterField(
model_name='proposal',
name='slides_url',
field=models.URLField(blank=True, default=b'', help_text=b'Location of slides for this proposal (e.g. SlideShare, Google Drive).', max_length=2083, verbose_name=b'Slides'),
),
]
|
Add DB migration for previous URL field changes.
|
Add DB migration for previous URL field changes.
See #214.
|
Python
|
mit
|
pydata/conf_site,pydata/conf_site,pydata/conf_site
|
Add DB migration for previous URL field changes.
See #214.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2018-01-16 21:29
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('proposals', '0007_add_urlfields'),
]
operations = [
migrations.AlterField(
model_name='proposal',
name='code_url',
field=models.URLField(blank=True, default=b'', help_text=b"Location of this proposal's code repository (e.g. Github).", max_length=2083, verbose_name=b'Repository'),
),
migrations.AlterField(
model_name='proposal',
name='slides_url',
field=models.URLField(blank=True, default=b'', help_text=b'Location of slides for this proposal (e.g. SlideShare, Google Drive).', max_length=2083, verbose_name=b'Slides'),
),
]
|
<commit_before><commit_msg>Add DB migration for previous URL field changes.
See #214.<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2018-01-16 21:29
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('proposals', '0007_add_urlfields'),
]
operations = [
migrations.AlterField(
model_name='proposal',
name='code_url',
field=models.URLField(blank=True, default=b'', help_text=b"Location of this proposal's code repository (e.g. Github).", max_length=2083, verbose_name=b'Repository'),
),
migrations.AlterField(
model_name='proposal',
name='slides_url',
field=models.URLField(blank=True, default=b'', help_text=b'Location of slides for this proposal (e.g. SlideShare, Google Drive).', max_length=2083, verbose_name=b'Slides'),
),
]
|
Add DB migration for previous URL field changes.
See #214.# -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2018-01-16 21:29
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('proposals', '0007_add_urlfields'),
]
operations = [
migrations.AlterField(
model_name='proposal',
name='code_url',
field=models.URLField(blank=True, default=b'', help_text=b"Location of this proposal's code repository (e.g. Github).", max_length=2083, verbose_name=b'Repository'),
),
migrations.AlterField(
model_name='proposal',
name='slides_url',
field=models.URLField(blank=True, default=b'', help_text=b'Location of slides for this proposal (e.g. SlideShare, Google Drive).', max_length=2083, verbose_name=b'Slides'),
),
]
|
<commit_before><commit_msg>Add DB migration for previous URL field changes.
See #214.<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2018-01-16 21:29
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('proposals', '0007_add_urlfields'),
]
operations = [
migrations.AlterField(
model_name='proposal',
name='code_url',
field=models.URLField(blank=True, default=b'', help_text=b"Location of this proposal's code repository (e.g. Github).", max_length=2083, verbose_name=b'Repository'),
),
migrations.AlterField(
model_name='proposal',
name='slides_url',
field=models.URLField(blank=True, default=b'', help_text=b'Location of slides for this proposal (e.g. SlideShare, Google Drive).', max_length=2083, verbose_name=b'Slides'),
),
]
|
|
6cbeab27628b83a0b075d344f46ad8f9207e335e
|
ikea-ota-download.py
|
ikea-ota-download.py
|
#!/usr/bin/env python
"""
Snipped to dowload current IKEA ZLL OTA files into ~/otau
Requires python 2.7, not compatible with python 3.
"""
import os
import json
import urllib
f = urllib.urlopen("http://fw.ota.homesmart.ikea.net/feed/version_info.json")
data = f.read()
arr = json.loads(data)
otapath = '%s/otau' % os.path.expanduser('~')
if not os.path.exists(otapath):
os.makedirs(otapath)
for i in arr:
if 'fw_binary_url' in i:
url = i['fw_binary_url']
ls = url.split('/')
fname = ls[len(ls) - 1]
path = '%s/%s' % (otapath, fname)
if not os.path.isfile(path):
urllib.urlretrieve(url, path)
print(path)
else:
print('%s already exists' % fname)
|
Add IKEA OTA download script
|
Add IKEA OTA download script
|
Python
|
bsd-3-clause
|
dresden-elektronik/deconz-rest-plugin,dresden-elektronik/deconz-rest-plugin,dresden-elektronik/deconz-rest-plugin,dresden-elektronik/deconz-rest-plugin,dresden-elektronik/deconz-rest-plugin
|
Add IKEA OTA download script
|
#!/usr/bin/env python
"""
Snipped to dowload current IKEA ZLL OTA files into ~/otau
Requires python 2.7, not compatible with python 3.
"""
import os
import json
import urllib
f = urllib.urlopen("http://fw.ota.homesmart.ikea.net/feed/version_info.json")
data = f.read()
arr = json.loads(data)
otapath = '%s/otau' % os.path.expanduser('~')
if not os.path.exists(otapath):
os.makedirs(otapath)
for i in arr:
if 'fw_binary_url' in i:
url = i['fw_binary_url']
ls = url.split('/')
fname = ls[len(ls) - 1]
path = '%s/%s' % (otapath, fname)
if not os.path.isfile(path):
urllib.urlretrieve(url, path)
print(path)
else:
print('%s already exists' % fname)
|
<commit_before><commit_msg>Add IKEA OTA download script<commit_after>
|
#!/usr/bin/env python
"""
Snipped to dowload current IKEA ZLL OTA files into ~/otau
Requires python 2.7, not compatible with python 3.
"""
import os
import json
import urllib
f = urllib.urlopen("http://fw.ota.homesmart.ikea.net/feed/version_info.json")
data = f.read()
arr = json.loads(data)
otapath = '%s/otau' % os.path.expanduser('~')
if not os.path.exists(otapath):
os.makedirs(otapath)
for i in arr:
if 'fw_binary_url' in i:
url = i['fw_binary_url']
ls = url.split('/')
fname = ls[len(ls) - 1]
path = '%s/%s' % (otapath, fname)
if not os.path.isfile(path):
urllib.urlretrieve(url, path)
print(path)
else:
print('%s already exists' % fname)
|
Add IKEA OTA download script#!/usr/bin/env python
"""
Snipped to dowload current IKEA ZLL OTA files into ~/otau
Requires python 2.7, not compatible with python 3.
"""
import os
import json
import urllib
f = urllib.urlopen("http://fw.ota.homesmart.ikea.net/feed/version_info.json")
data = f.read()
arr = json.loads(data)
otapath = '%s/otau' % os.path.expanduser('~')
if not os.path.exists(otapath):
os.makedirs(otapath)
for i in arr:
if 'fw_binary_url' in i:
url = i['fw_binary_url']
ls = url.split('/')
fname = ls[len(ls) - 1]
path = '%s/%s' % (otapath, fname)
if not os.path.isfile(path):
urllib.urlretrieve(url, path)
print(path)
else:
print('%s already exists' % fname)
|
<commit_before><commit_msg>Add IKEA OTA download script<commit_after>#!/usr/bin/env python
"""
Snipped to dowload current IKEA ZLL OTA files into ~/otau
Requires python 2.7, not compatible with python 3.
"""
import os
import json
import urllib
f = urllib.urlopen("http://fw.ota.homesmart.ikea.net/feed/version_info.json")
data = f.read()
arr = json.loads(data)
otapath = '%s/otau' % os.path.expanduser('~')
if not os.path.exists(otapath):
os.makedirs(otapath)
for i in arr:
if 'fw_binary_url' in i:
url = i['fw_binary_url']
ls = url.split('/')
fname = ls[len(ls) - 1]
path = '%s/%s' % (otapath, fname)
if not os.path.isfile(path):
urllib.urlretrieve(url, path)
print(path)
else:
print('%s already exists' % fname)
|
|
52d312ad6bcfd68eb88202ef40574f10788eb70b
|
leetcode/reverse_string.py
|
leetcode/reverse_string.py
|
"""
# Problem statement
https://leetcode.com/explore/interview/card/top-interview-questions-easy/127/strings/879/
## Algorithm description
Traverse the given input until the middle of it.
Swap the extremes of the input until reach the middle of it.
Example:
input = "abcde"
len(input) = 5
middle = ceil(5 / 3) = 3
ii = initial_index
fi = final_index
i = 0, fi = 4, "abcde"
i = 1, fi = 3, "ebcda"
i = 2, fi = 2, "edcba"
This works for odd and event inputs.
### Cases
I considered the following cases:
1. empty: "" -> ""
1. one: "a" -> "a"
1. String length is odd: "abc" -> "cba"
1. String length is even: "abcd" -> "dcba"
### Examples:
"abcde" -> l: 5 -> int(5 / 2) = 3 ->
i = 0 < 3
ii
0, 5 - 1 - 0 = 4
1, 5 - 1 - 1 = 3
2, 5 - 1 - 2 = 2
"abcd" -> l: 4 -> int(4 / 2) = 2 ->
i = 0 < 2
ii
0, 4 - 1 - 0 = 3
1, 4 - 1 - 1 = 2
## Complexity
### Time
1. Traverse the given input until the middle of it: O(n)
1. Swap elements of the input: O(1)
Total = O(n) + O(1) = O(n)
### Space
Only simple variables were created: O(1)
## To improve
I prefer to avoid mutating data structures, so I would preferred to create
a new array to store the answer and the return it.
I mutated the given input because that was a constraint given in the problem
statement:
> Do not allocate extra space for another array, you must do this by modifying the input array in-place with O(1) extra memory.
"""
import math
class Solution:
def reverseString(self, string: [str]) -> None:
"""
Do not return anything, modify s in-place instead.
"""
length = len(string)
if length <= 1:
return
limit = math.ceil(length / 2)
for index in range(limit):
final_index = length - 1 - index
string[index], string[final_index] = string[final_index], string[index]
if __name__ == "__main__":
solution = Solution()
string = ["H","a","n","n","a","h"]
solution.reverseString(string)
print(string)
|
Add solution for Reverse String problem
|
Add solution for Reverse String problem
|
Python
|
mit
|
julianespinel/training,julianespinel/training,julianespinel/training,julianespinel/trainning,julianespinel/trainning,julianespinel/training
|
Add solution for Reverse String problem
|
"""
# Problem statement
https://leetcode.com/explore/interview/card/top-interview-questions-easy/127/strings/879/
## Algorithm description
Traverse the given input until the middle of it.
Swap the extremes of the input until reach the middle of it.
Example:
input = "abcde"
len(input) = 5
middle = ceil(5 / 3) = 3
ii = initial_index
fi = final_index
i = 0, fi = 4, "abcde"
i = 1, fi = 3, "ebcda"
i = 2, fi = 2, "edcba"
This works for odd and event inputs.
### Cases
I considered the following cases:
1. empty: "" -> ""
1. one: "a" -> "a"
1. String length is odd: "abc" -> "cba"
1. String length is even: "abcd" -> "dcba"
### Examples:
"abcde" -> l: 5 -> int(5 / 2) = 3 ->
i = 0 < 3
ii
0, 5 - 1 - 0 = 4
1, 5 - 1 - 1 = 3
2, 5 - 1 - 2 = 2
"abcd" -> l: 4 -> int(4 / 2) = 2 ->
i = 0 < 2
ii
0, 4 - 1 - 0 = 3
1, 4 - 1 - 1 = 2
## Complexity
### Time
1. Traverse the given input until the middle of it: O(n)
1. Swap elements of the input: O(1)
Total = O(n) + O(1) = O(n)
### Space
Only simple variables were created: O(1)
## To improve
I prefer to avoid mutating data structures, so I would preferred to create
a new array to store the answer and the return it.
I mutated the given input because that was a constraint given in the problem
statement:
> Do not allocate extra space for another array, you must do this by modifying the input array in-place with O(1) extra memory.
"""
import math
class Solution:
def reverseString(self, string: [str]) -> None:
"""
Do not return anything, modify s in-place instead.
"""
length = len(string)
if length <= 1:
return
limit = math.ceil(length / 2)
for index in range(limit):
final_index = length - 1 - index
string[index], string[final_index] = string[final_index], string[index]
if __name__ == "__main__":
solution = Solution()
string = ["H","a","n","n","a","h"]
solution.reverseString(string)
print(string)
|
<commit_before><commit_msg>Add solution for Reverse String problem<commit_after>
|
"""
# Problem statement
https://leetcode.com/explore/interview/card/top-interview-questions-easy/127/strings/879/
## Algorithm description
Traverse the given input until the middle of it.
Swap the extremes of the input until reach the middle of it.
Example:
input = "abcde"
len(input) = 5
middle = ceil(5 / 3) = 3
ii = initial_index
fi = final_index
i = 0, fi = 4, "abcde"
i = 1, fi = 3, "ebcda"
i = 2, fi = 2, "edcba"
This works for odd and event inputs.
### Cases
I considered the following cases:
1. empty: "" -> ""
1. one: "a" -> "a"
1. String length is odd: "abc" -> "cba"
1. String length is even: "abcd" -> "dcba"
### Examples:
"abcde" -> l: 5 -> int(5 / 2) = 3 ->
i = 0 < 3
ii
0, 5 - 1 - 0 = 4
1, 5 - 1 - 1 = 3
2, 5 - 1 - 2 = 2
"abcd" -> l: 4 -> int(4 / 2) = 2 ->
i = 0 < 2
ii
0, 4 - 1 - 0 = 3
1, 4 - 1 - 1 = 2
## Complexity
### Time
1. Traverse the given input until the middle of it: O(n)
1. Swap elements of the input: O(1)
Total = O(n) + O(1) = O(n)
### Space
Only simple variables were created: O(1)
## To improve
I prefer to avoid mutating data structures, so I would preferred to create
a new array to store the answer and the return it.
I mutated the given input because that was a constraint given in the problem
statement:
> Do not allocate extra space for another array, you must do this by modifying the input array in-place with O(1) extra memory.
"""
import math
class Solution:
def reverseString(self, string: [str]) -> None:
"""
Do not return anything, modify s in-place instead.
"""
length = len(string)
if length <= 1:
return
limit = math.ceil(length / 2)
for index in range(limit):
final_index = length - 1 - index
string[index], string[final_index] = string[final_index], string[index]
if __name__ == "__main__":
solution = Solution()
string = ["H","a","n","n","a","h"]
solution.reverseString(string)
print(string)
|
Add solution for Reverse String problem"""
# Problem statement
https://leetcode.com/explore/interview/card/top-interview-questions-easy/127/strings/879/
## Algorithm description
Traverse the given input until the middle of it.
Swap the extremes of the input until reach the middle of it.
Example:
input = "abcde"
len(input) = 5
middle = ceil(5 / 3) = 3
ii = initial_index
fi = final_index
i = 0, fi = 4, "abcde"
i = 1, fi = 3, "ebcda"
i = 2, fi = 2, "edcba"
This works for odd and event inputs.
### Cases
I considered the following cases:
1. empty: "" -> ""
1. one: "a" -> "a"
1. String length is odd: "abc" -> "cba"
1. String length is even: "abcd" -> "dcba"
### Examples:
"abcde" -> l: 5 -> int(5 / 2) = 3 ->
i = 0 < 3
ii
0, 5 - 1 - 0 = 4
1, 5 - 1 - 1 = 3
2, 5 - 1 - 2 = 2
"abcd" -> l: 4 -> int(4 / 2) = 2 ->
i = 0 < 2
ii
0, 4 - 1 - 0 = 3
1, 4 - 1 - 1 = 2
## Complexity
### Time
1. Traverse the given input until the middle of it: O(n)
1. Swap elements of the input: O(1)
Total = O(n) + O(1) = O(n)
### Space
Only simple variables were created: O(1)
## To improve
I prefer to avoid mutating data structures, so I would preferred to create
a new array to store the answer and the return it.
I mutated the given input because that was a constraint given in the problem
statement:
> Do not allocate extra space for another array, you must do this by modifying the input array in-place with O(1) extra memory.
"""
import math
class Solution:
def reverseString(self, string: [str]) -> None:
"""
Do not return anything, modify s in-place instead.
"""
length = len(string)
if length <= 1:
return
limit = math.ceil(length / 2)
for index in range(limit):
final_index = length - 1 - index
string[index], string[final_index] = string[final_index], string[index]
if __name__ == "__main__":
solution = Solution()
string = ["H","a","n","n","a","h"]
solution.reverseString(string)
print(string)
|
<commit_before><commit_msg>Add solution for Reverse String problem<commit_after>"""
# Problem statement
https://leetcode.com/explore/interview/card/top-interview-questions-easy/127/strings/879/
## Algorithm description
Traverse the given input until the middle of it.
Swap the extremes of the input until reach the middle of it.
Example:
input = "abcde"
len(input) = 5
middle = ceil(5 / 3) = 3
ii = initial_index
fi = final_index
i = 0, fi = 4, "abcde"
i = 1, fi = 3, "ebcda"
i = 2, fi = 2, "edcba"
This works for odd and event inputs.
### Cases
I considered the following cases:
1. empty: "" -> ""
1. one: "a" -> "a"
1. String length is odd: "abc" -> "cba"
1. String length is even: "abcd" -> "dcba"
### Examples:
"abcde" -> l: 5 -> int(5 / 2) = 3 ->
i = 0 < 3
ii
0, 5 - 1 - 0 = 4
1, 5 - 1 - 1 = 3
2, 5 - 1 - 2 = 2
"abcd" -> l: 4 -> int(4 / 2) = 2 ->
i = 0 < 2
ii
0, 4 - 1 - 0 = 3
1, 4 - 1 - 1 = 2
## Complexity
### Time
1. Traverse the given input until the middle of it: O(n)
1. Swap elements of the input: O(1)
Total = O(n) + O(1) = O(n)
### Space
Only simple variables were created: O(1)
## To improve
I prefer to avoid mutating data structures, so I would preferred to create
a new array to store the answer and the return it.
I mutated the given input because that was a constraint given in the problem
statement:
> Do not allocate extra space for another array, you must do this by modifying the input array in-place with O(1) extra memory.
"""
import math
class Solution:
def reverseString(self, string: [str]) -> None:
"""
Do not return anything, modify s in-place instead.
"""
length = len(string)
if length <= 1:
return
limit = math.ceil(length / 2)
for index in range(limit):
final_index = length - 1 - index
string[index], string[final_index] = string[final_index], string[index]
if __name__ == "__main__":
solution = Solution()
string = ["H","a","n","n","a","h"]
solution.reverseString(string)
print(string)
|
|
093c29e315ca0a1eb5efc3099892ceea016ffde1
|
awx/main/management/commands/revoke_tokens.py
|
awx/main/management/commands/revoke_tokens.py
|
# Django
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
# AWX
from awx.main.models.oauth import OAuth2AccessToken
from oauth2_provider.models import RefreshToken
def revoke_tokens(token_list):
for token in token_list:
token.revoke()
class Command(BaseCommand):
"""Command that revokes OAuth2 tokens and refresh tokens."""
help='Revokes OAuth2 tokens and refresh tokens.'
def add_arguments(self, parser):
parser.add_argument('--user', dest='user', type=str)
parser.add_argument('--revoke_refresh', dest='revoke_refresh', action='store_true')
def handle(self, *args, **options):
if not options['user']:
if options['revoke_refresh']:
revoke_tokens(RefreshToken.objects.all())
revoke_tokens(OAuth2AccessToken.objects.all())
else:
try:
user = User.objects.get(username=options['user'])
except ObjectDoesNotExist:
raise CommandError('The user does not exist.')
if options['revoke_refresh']:
revoke_tokens(RefreshToken.objects.filter(user=user))
revoke_tokens(user.main_oauth2accesstoken.filter(user=user))
|
Add command to revoke tokens
|
Add command to revoke tokens
Signed-off-by: Cristian Vargas <87b43435284539b2f7f4e4dcab5e78251c243226@swapps.co>
|
Python
|
apache-2.0
|
wwitzel3/awx,wwitzel3/awx,wwitzel3/awx,wwitzel3/awx
|
Add command to revoke tokens
Signed-off-by: Cristian Vargas <87b43435284539b2f7f4e4dcab5e78251c243226@swapps.co>
|
# Django
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
# AWX
from awx.main.models.oauth import OAuth2AccessToken
from oauth2_provider.models import RefreshToken
def revoke_tokens(token_list):
for token in token_list:
token.revoke()
class Command(BaseCommand):
"""Command that revokes OAuth2 tokens and refresh tokens."""
help='Revokes OAuth2 tokens and refresh tokens.'
def add_arguments(self, parser):
parser.add_argument('--user', dest='user', type=str)
parser.add_argument('--revoke_refresh', dest='revoke_refresh', action='store_true')
def handle(self, *args, **options):
if not options['user']:
if options['revoke_refresh']:
revoke_tokens(RefreshToken.objects.all())
revoke_tokens(OAuth2AccessToken.objects.all())
else:
try:
user = User.objects.get(username=options['user'])
except ObjectDoesNotExist:
raise CommandError('The user does not exist.')
if options['revoke_refresh']:
revoke_tokens(RefreshToken.objects.filter(user=user))
revoke_tokens(user.main_oauth2accesstoken.filter(user=user))
|
<commit_before><commit_msg>Add command to revoke tokens
Signed-off-by: Cristian Vargas <87b43435284539b2f7f4e4dcab5e78251c243226@swapps.co><commit_after>
|
# Django
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
# AWX
from awx.main.models.oauth import OAuth2AccessToken
from oauth2_provider.models import RefreshToken
def revoke_tokens(token_list):
for token in token_list:
token.revoke()
class Command(BaseCommand):
"""Command that revokes OAuth2 tokens and refresh tokens."""
help='Revokes OAuth2 tokens and refresh tokens.'
def add_arguments(self, parser):
parser.add_argument('--user', dest='user', type=str)
parser.add_argument('--revoke_refresh', dest='revoke_refresh', action='store_true')
def handle(self, *args, **options):
if not options['user']:
if options['revoke_refresh']:
revoke_tokens(RefreshToken.objects.all())
revoke_tokens(OAuth2AccessToken.objects.all())
else:
try:
user = User.objects.get(username=options['user'])
except ObjectDoesNotExist:
raise CommandError('The user does not exist.')
if options['revoke_refresh']:
revoke_tokens(RefreshToken.objects.filter(user=user))
revoke_tokens(user.main_oauth2accesstoken.filter(user=user))
|
Add command to revoke tokens
Signed-off-by: Cristian Vargas <87b43435284539b2f7f4e4dcab5e78251c243226@swapps.co># Django
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
# AWX
from awx.main.models.oauth import OAuth2AccessToken
from oauth2_provider.models import RefreshToken
def revoke_tokens(token_list):
for token in token_list:
token.revoke()
class Command(BaseCommand):
"""Command that revokes OAuth2 tokens and refresh tokens."""
help='Revokes OAuth2 tokens and refresh tokens.'
def add_arguments(self, parser):
parser.add_argument('--user', dest='user', type=str)
parser.add_argument('--revoke_refresh', dest='revoke_refresh', action='store_true')
def handle(self, *args, **options):
if not options['user']:
if options['revoke_refresh']:
revoke_tokens(RefreshToken.objects.all())
revoke_tokens(OAuth2AccessToken.objects.all())
else:
try:
user = User.objects.get(username=options['user'])
except ObjectDoesNotExist:
raise CommandError('The user does not exist.')
if options['revoke_refresh']:
revoke_tokens(RefreshToken.objects.filter(user=user))
revoke_tokens(user.main_oauth2accesstoken.filter(user=user))
|
<commit_before><commit_msg>Add command to revoke tokens
Signed-off-by: Cristian Vargas <87b43435284539b2f7f4e4dcab5e78251c243226@swapps.co><commit_after># Django
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
# AWX
from awx.main.models.oauth import OAuth2AccessToken
from oauth2_provider.models import RefreshToken
def revoke_tokens(token_list):
for token in token_list:
token.revoke()
class Command(BaseCommand):
"""Command that revokes OAuth2 tokens and refresh tokens."""
help='Revokes OAuth2 tokens and refresh tokens.'
def add_arguments(self, parser):
parser.add_argument('--user', dest='user', type=str)
parser.add_argument('--revoke_refresh', dest='revoke_refresh', action='store_true')
def handle(self, *args, **options):
if not options['user']:
if options['revoke_refresh']:
revoke_tokens(RefreshToken.objects.all())
revoke_tokens(OAuth2AccessToken.objects.all())
else:
try:
user = User.objects.get(username=options['user'])
except ObjectDoesNotExist:
raise CommandError('The user does not exist.')
if options['revoke_refresh']:
revoke_tokens(RefreshToken.objects.filter(user=user))
revoke_tokens(user.main_oauth2accesstoken.filter(user=user))
|
|
2af5a573196d409cb70b58fe828de9515dc4088a
|
bin/fetch_mcgill_transcript.py
|
bin/fetch_mcgill_transcript.py
|
#!/usr/bin/env python
import argparse
import getpass
import sys
import StringIO
import mcgill
def parse_args():
parser = argparse.ArgumentParser(
description='fetch McGill transcript',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-a', '--auth', help='''email:password or sid:pin
If the password (or pin) is omitted, will prompt for them.
If the argument is omitted altogether, will look to the values of the
MCGILL_SID and MCGILL_PIN environment variables.
''')
return parser.parse_args()
def get_user_credentials():
args = parse_args()
if args.auth is None:
return None, None
if ':' in args.auth:
return args.auth.split(':', 1)
try:
password = getpass.getpass("Password for user '%s': " % args.auth)
except (EOFError, KeyboardInterrupt):
sys.stderr.write('\n')
sys.exit(0)
return args.auth, password
# TODO(isbadawi): maybe extract some of the logic here to work on
# generic tabular data (as e.g. lists of lists)?
def format_transcript(transcript):
def term_key(term):
semester, year = term.semester.split()
return year, {'Winter': 0, 'Summer': 1, 'Fall': 2}[semester]
buf = StringIO.StringIO()
longest_subject_len = max(len(course.subject)
for course in transcript.get_courses())
longest_title_len = max(len(course.title)
for course in transcript.get_courses())
for term in sorted(transcript.terms.values(), key=term_key):
buf.write(term.semester)
if term.gpa is not None and term.cum_gpa is not None:
buf.write(' (GPA: %s, cumulative: %s)' % (term.gpa, term.cum_gpa))
buf.write('\n')
for course in term.courses:
buf.write('\t')
buf.write('\t'.join([
course.subject.ljust(longest_subject_len),
course.title.ljust(longest_title_len),
str(course.credits),
(course.grade or '').ljust(2),
(course.average or '').ljust(2)]))
buf.write('\n')
return buf.getvalue()
def main():
username, password = get_user_credentials()
client = mcgill.login(username, password)
transcript = client.transcript()
print format_transcript(transcript)
if __name__ == '__main__':
main()
|
Add a quick script to dump transcript to stdout.
|
Add a quick script to dump transcript to stdout.
|
Python
|
mit
|
isbadawi/minerva
|
Add a quick script to dump transcript to stdout.
|
#!/usr/bin/env python
import argparse
import getpass
import sys
import StringIO
import mcgill
def parse_args():
parser = argparse.ArgumentParser(
description='fetch McGill transcript',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-a', '--auth', help='''email:password or sid:pin
If the password (or pin) is omitted, will prompt for them.
If the argument is omitted altogether, will look to the values of the
MCGILL_SID and MCGILL_PIN environment variables.
''')
return parser.parse_args()
def get_user_credentials():
args = parse_args()
if args.auth is None:
return None, None
if ':' in args.auth:
return args.auth.split(':', 1)
try:
password = getpass.getpass("Password for user '%s': " % args.auth)
except (EOFError, KeyboardInterrupt):
sys.stderr.write('\n')
sys.exit(0)
return args.auth, password
# TODO(isbadawi): maybe extract some of the logic here to work on
# generic tabular data (as e.g. lists of lists)?
def format_transcript(transcript):
def term_key(term):
semester, year = term.semester.split()
return year, {'Winter': 0, 'Summer': 1, 'Fall': 2}[semester]
buf = StringIO.StringIO()
longest_subject_len = max(len(course.subject)
for course in transcript.get_courses())
longest_title_len = max(len(course.title)
for course in transcript.get_courses())
for term in sorted(transcript.terms.values(), key=term_key):
buf.write(term.semester)
if term.gpa is not None and term.cum_gpa is not None:
buf.write(' (GPA: %s, cumulative: %s)' % (term.gpa, term.cum_gpa))
buf.write('\n')
for course in term.courses:
buf.write('\t')
buf.write('\t'.join([
course.subject.ljust(longest_subject_len),
course.title.ljust(longest_title_len),
str(course.credits),
(course.grade or '').ljust(2),
(course.average or '').ljust(2)]))
buf.write('\n')
return buf.getvalue()
def main():
username, password = get_user_credentials()
client = mcgill.login(username, password)
transcript = client.transcript()
print format_transcript(transcript)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a quick script to dump transcript to stdout.<commit_after>
|
#!/usr/bin/env python
import argparse
import getpass
import sys
import StringIO
import mcgill
def parse_args():
parser = argparse.ArgumentParser(
description='fetch McGill transcript',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-a', '--auth', help='''email:password or sid:pin
If the password (or pin) is omitted, will prompt for them.
If the argument is omitted altogether, will look to the values of the
MCGILL_SID and MCGILL_PIN environment variables.
''')
return parser.parse_args()
def get_user_credentials():
args = parse_args()
if args.auth is None:
return None, None
if ':' in args.auth:
return args.auth.split(':', 1)
try:
password = getpass.getpass("Password for user '%s': " % args.auth)
except (EOFError, KeyboardInterrupt):
sys.stderr.write('\n')
sys.exit(0)
return args.auth, password
# TODO(isbadawi): maybe extract some of the logic here to work on
# generic tabular data (as e.g. lists of lists)?
def format_transcript(transcript):
def term_key(term):
semester, year = term.semester.split()
return year, {'Winter': 0, 'Summer': 1, 'Fall': 2}[semester]
buf = StringIO.StringIO()
longest_subject_len = max(len(course.subject)
for course in transcript.get_courses())
longest_title_len = max(len(course.title)
for course in transcript.get_courses())
for term in sorted(transcript.terms.values(), key=term_key):
buf.write(term.semester)
if term.gpa is not None and term.cum_gpa is not None:
buf.write(' (GPA: %s, cumulative: %s)' % (term.gpa, term.cum_gpa))
buf.write('\n')
for course in term.courses:
buf.write('\t')
buf.write('\t'.join([
course.subject.ljust(longest_subject_len),
course.title.ljust(longest_title_len),
str(course.credits),
(course.grade or '').ljust(2),
(course.average or '').ljust(2)]))
buf.write('\n')
return buf.getvalue()
def main():
username, password = get_user_credentials()
client = mcgill.login(username, password)
transcript = client.transcript()
print format_transcript(transcript)
if __name__ == '__main__':
main()
|
Add a quick script to dump transcript to stdout.#!/usr/bin/env python
import argparse
import getpass
import sys
import StringIO
import mcgill
def parse_args():
parser = argparse.ArgumentParser(
description='fetch McGill transcript',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-a', '--auth', help='''email:password or sid:pin
If the password (or pin) is omitted, will prompt for them.
If the argument is omitted altogether, will look to the values of the
MCGILL_SID and MCGILL_PIN environment variables.
''')
return parser.parse_args()
def get_user_credentials():
args = parse_args()
if args.auth is None:
return None, None
if ':' in args.auth:
return args.auth.split(':', 1)
try:
password = getpass.getpass("Password for user '%s': " % args.auth)
except (EOFError, KeyboardInterrupt):
sys.stderr.write('\n')
sys.exit(0)
return args.auth, password
# TODO(isbadawi): maybe extract some of the logic here to work on
# generic tabular data (as e.g. lists of lists)?
def format_transcript(transcript):
def term_key(term):
semester, year = term.semester.split()
return year, {'Winter': 0, 'Summer': 1, 'Fall': 2}[semester]
buf = StringIO.StringIO()
longest_subject_len = max(len(course.subject)
for course in transcript.get_courses())
longest_title_len = max(len(course.title)
for course in transcript.get_courses())
for term in sorted(transcript.terms.values(), key=term_key):
buf.write(term.semester)
if term.gpa is not None and term.cum_gpa is not None:
buf.write(' (GPA: %s, cumulative: %s)' % (term.gpa, term.cum_gpa))
buf.write('\n')
for course in term.courses:
buf.write('\t')
buf.write('\t'.join([
course.subject.ljust(longest_subject_len),
course.title.ljust(longest_title_len),
str(course.credits),
(course.grade or '').ljust(2),
(course.average or '').ljust(2)]))
buf.write('\n')
return buf.getvalue()
def main():
username, password = get_user_credentials()
client = mcgill.login(username, password)
transcript = client.transcript()
print format_transcript(transcript)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a quick script to dump transcript to stdout.<commit_after>#!/usr/bin/env python
import argparse
import getpass
import sys
import StringIO
import mcgill
def parse_args():
parser = argparse.ArgumentParser(
description='fetch McGill transcript',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-a', '--auth', help='''email:password or sid:pin
If the password (or pin) is omitted, will prompt for them.
If the argument is omitted altogether, will look to the values of the
MCGILL_SID and MCGILL_PIN environment variables.
''')
return parser.parse_args()
def get_user_credentials():
args = parse_args()
if args.auth is None:
return None, None
if ':' in args.auth:
return args.auth.split(':', 1)
try:
password = getpass.getpass("Password for user '%s': " % args.auth)
except (EOFError, KeyboardInterrupt):
sys.stderr.write('\n')
sys.exit(0)
return args.auth, password
# TODO(isbadawi): maybe extract some of the logic here to work on
# generic tabular data (as e.g. lists of lists)?
def format_transcript(transcript):
def term_key(term):
semester, year = term.semester.split()
return year, {'Winter': 0, 'Summer': 1, 'Fall': 2}[semester]
buf = StringIO.StringIO()
longest_subject_len = max(len(course.subject)
for course in transcript.get_courses())
longest_title_len = max(len(course.title)
for course in transcript.get_courses())
for term in sorted(transcript.terms.values(), key=term_key):
buf.write(term.semester)
if term.gpa is not None and term.cum_gpa is not None:
buf.write(' (GPA: %s, cumulative: %s)' % (term.gpa, term.cum_gpa))
buf.write('\n')
for course in term.courses:
buf.write('\t')
buf.write('\t'.join([
course.subject.ljust(longest_subject_len),
course.title.ljust(longest_title_len),
str(course.credits),
(course.grade or '').ljust(2),
(course.average or '').ljust(2)]))
buf.write('\n')
return buf.getvalue()
def main():
username, password = get_user_credentials()
client = mcgill.login(username, password)
transcript = client.transcript()
print format_transcript(transcript)
if __name__ == '__main__':
main()
|
|
d470e524e534a7a0d0b217a12777d6e18c45eb78
|
nurseconnect/formfields.py
|
nurseconnect/formfields.py
|
from django.core.exceptions import ValidationError
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
from phonenumber_field.formfields import PhoneNumberField as ImportedPhoneNumberField
from phonenumber_field.phonenumber import to_python
def validate_international_phonenumber(value):
phone_number = to_python(value)
if phone_number and not phone_number.is_valid():
raise ValidationError(_("Please enter a valid South African cellphone number."))
class PhoneNumberField(ImportedPhoneNumberField):
default_error_messages = {
"invalid": _("Please enter a valid South African cellphone number."),
}
default_validators = [validate_international_phonenumber]
def to_python(self, value):
phone_number = to_python(value)
if phone_number and not phone_number.is_valid():
raise ValidationError(self.error_messages['invalid'])
return str(phone_number)
|
Add custom PhoneNumberField for use in NurseConnect
|
Add custom PhoneNumberField for use in NurseConnect
|
Python
|
bsd-2-clause
|
praekelt/nurseconnect,praekelt/nurseconnect,praekelt/nurseconnect
|
Add custom PhoneNumberField for use in NurseConnect
|
from django.core.exceptions import ValidationError
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
from phonenumber_field.formfields import PhoneNumberField as ImportedPhoneNumberField
from phonenumber_field.phonenumber import to_python
def validate_international_phonenumber(value):
phone_number = to_python(value)
if phone_number and not phone_number.is_valid():
raise ValidationError(_("Please enter a valid South African cellphone number."))
class PhoneNumberField(ImportedPhoneNumberField):
default_error_messages = {
"invalid": _("Please enter a valid South African cellphone number."),
}
default_validators = [validate_international_phonenumber]
def to_python(self, value):
phone_number = to_python(value)
if phone_number and not phone_number.is_valid():
raise ValidationError(self.error_messages['invalid'])
return str(phone_number)
|
<commit_before><commit_msg>Add custom PhoneNumberField for use in NurseConnect<commit_after>
|
from django.core.exceptions import ValidationError
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
from phonenumber_field.formfields import PhoneNumberField as ImportedPhoneNumberField
from phonenumber_field.phonenumber import to_python
def validate_international_phonenumber(value):
phone_number = to_python(value)
if phone_number and not phone_number.is_valid():
raise ValidationError(_("Please enter a valid South African cellphone number."))
class PhoneNumberField(ImportedPhoneNumberField):
default_error_messages = {
"invalid": _("Please enter a valid South African cellphone number."),
}
default_validators = [validate_international_phonenumber]
def to_python(self, value):
phone_number = to_python(value)
if phone_number and not phone_number.is_valid():
raise ValidationError(self.error_messages['invalid'])
return str(phone_number)
|
Add custom PhoneNumberField for use in NurseConnectfrom django.core.exceptions import ValidationError
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
from phonenumber_field.formfields import PhoneNumberField as ImportedPhoneNumberField
from phonenumber_field.phonenumber import to_python
def validate_international_phonenumber(value):
phone_number = to_python(value)
if phone_number and not phone_number.is_valid():
raise ValidationError(_("Please enter a valid South African cellphone number."))
class PhoneNumberField(ImportedPhoneNumberField):
default_error_messages = {
"invalid": _("Please enter a valid South African cellphone number."),
}
default_validators = [validate_international_phonenumber]
def to_python(self, value):
phone_number = to_python(value)
if phone_number and not phone_number.is_valid():
raise ValidationError(self.error_messages['invalid'])
return str(phone_number)
|
<commit_before><commit_msg>Add custom PhoneNumberField for use in NurseConnect<commit_after>from django.core.exceptions import ValidationError
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
from phonenumber_field.formfields import PhoneNumberField as ImportedPhoneNumberField
from phonenumber_field.phonenumber import to_python
def validate_international_phonenumber(value):
phone_number = to_python(value)
if phone_number and not phone_number.is_valid():
raise ValidationError(_("Please enter a valid South African cellphone number."))
class PhoneNumberField(ImportedPhoneNumberField):
default_error_messages = {
"invalid": _("Please enter a valid South African cellphone number."),
}
default_validators = [validate_international_phonenumber]
def to_python(self, value):
phone_number = to_python(value)
if phone_number and not phone_number.is_valid():
raise ValidationError(self.error_messages['invalid'])
return str(phone_number)
|
|
c9f397162c131455ca2f8b63ab7ede126529c053
|
005.py
|
005.py
|
"""
Project Euler Problem 5
=======================
2520 is the smallest number that can be divided by each of the numbers
from 1 to 10 without any remainder.
What is the smallest number that is evenly divisible by all of the numbers
from 1 to 20?
"""
from functools import reduce
from math import gcd
def divisible_by(factors):
"""
Takes a list of integers and returns the smallest number that
can be divided by each without any remainder.
This works because:
lcm(x, y) == (x * y) / gcd(x, y)
And with the commutative property of multiplication:
lcm(x, y, z) == lcm(x, lcm(y, z))
"""
return reduce(lambda x, y: x * y // gcd(x, y), factors)
def test_divible_by():
assert divisible_by(range(1, 10 + 1)) == 2520
print(divisible_by(range(1, 20 + 1)))
|
Add solution and unit tests for problem 5
|
Add solution and unit tests for problem 5
|
Python
|
mit
|
BeataBak/project-euler-problems
|
Add solution and unit tests for problem 5
|
"""
Project Euler Problem 5
=======================
2520 is the smallest number that can be divided by each of the numbers
from 1 to 10 without any remainder.
What is the smallest number that is evenly divisible by all of the numbers
from 1 to 20?
"""
from functools import reduce
from math import gcd
def divisible_by(factors):
"""
Takes a list of integers and returns the smallest number that
can be divided by each without any remainder.
This works because:
lcm(x, y) == (x * y) / gcd(x, y)
And with the commutative property of multiplication:
lcm(x, y, z) == lcm(x, lcm(y, z))
"""
return reduce(lambda x, y: x * y // gcd(x, y), factors)
def test_divible_by():
assert divisible_by(range(1, 10 + 1)) == 2520
print(divisible_by(range(1, 20 + 1)))
|
<commit_before><commit_msg>Add solution and unit tests for problem 5<commit_after>
|
"""
Project Euler Problem 5
=======================
2520 is the smallest number that can be divided by each of the numbers
from 1 to 10 without any remainder.
What is the smallest number that is evenly divisible by all of the numbers
from 1 to 20?
"""
from functools import reduce
from math import gcd
def divisible_by(factors):
"""
Takes a list of integers and returns the smallest number that
can be divided by each without any remainder.
This works because:
lcm(x, y) == (x * y) / gcd(x, y)
And with the commutative property of multiplication:
lcm(x, y, z) == lcm(x, lcm(y, z))
"""
return reduce(lambda x, y: x * y // gcd(x, y), factors)
def test_divible_by():
assert divisible_by(range(1, 10 + 1)) == 2520
print(divisible_by(range(1, 20 + 1)))
|
Add solution and unit tests for problem 5"""
Project Euler Problem 5
=======================
2520 is the smallest number that can be divided by each of the numbers
from 1 to 10 without any remainder.
What is the smallest number that is evenly divisible by all of the numbers
from 1 to 20?
"""
from functools import reduce
from math import gcd
def divisible_by(factors):
"""
Takes a list of integers and returns the smallest number that
can be divided by each without any remainder.
This works because:
lcm(x, y) == (x * y) / gcd(x, y)
And with the commutative property of multiplication:
lcm(x, y, z) == lcm(x, lcm(y, z))
"""
return reduce(lambda x, y: x * y // gcd(x, y), factors)
def test_divible_by():
assert divisible_by(range(1, 10 + 1)) == 2520
print(divisible_by(range(1, 20 + 1)))
|
<commit_before><commit_msg>Add solution and unit tests for problem 5<commit_after>"""
Project Euler Problem 5
=======================
2520 is the smallest number that can be divided by each of the numbers
from 1 to 10 without any remainder.
What is the smallest number that is evenly divisible by all of the numbers
from 1 to 20?
"""
from functools import reduce
from math import gcd
def divisible_by(factors):
"""
Takes a list of integers and returns the smallest number that
can be divided by each without any remainder.
This works because:
lcm(x, y) == (x * y) / gcd(x, y)
And with the commutative property of multiplication:
lcm(x, y, z) == lcm(x, lcm(y, z))
"""
return reduce(lambda x, y: x * y // gcd(x, y), factors)
def test_divible_by():
assert divisible_by(range(1, 10 + 1)) == 2520
print(divisible_by(range(1, 20 + 1)))
|
|
e2d7b446cc290cda01709636e964f850cbef0532
|
17B-162/HI/analysis/convolve_and_match_iram30m.py
|
17B-162/HI/analysis/convolve_and_match_iram30m.py
|
'''
Create a cube that is spatially-matched and convolved to the IRAM 30-m
CO(2-1) cube.
'''
import os
from os.path import join as osjoin
from cube_analysis.reprojection import reproject_cube
from cube_analysis.run_pipe import run_pipeline
from paths import (seventeenB_HI_data_1kms_wGBT_path,
seventeenB_1kms_wGBT_HI_file_dict, iram_co21_data_path)
out_folder = seventeenB_HI_data_1kms_wGBT_path("iram_co21_match",
no_check=True)
if not os.path.exists(out_folder):
os.mkdir(out_folder)
out_name = seventeenB_1kms_wGBT_HI_file_dict['Cube'].split("/")[-1].rstrip(".fits") + \
".iram30m_spatialmatch.fits"
reproject_cube(seventeenB_1kms_wGBT_HI_file_dict['Cube'],
iram_co21_data_path("m33.co21_iram.fits"),
out_name,
output_folder=out_folder,
save_spectral=False,
is_huge=True,
reproject_type='spatial',
common_beam=True,
verbose=True,
chunk=40)
run_pipeline(osjoin(out_folder, out_name),
out_folder,
masking_kwargs={"method": "ppv_connectivity",
"save_cube": True,
"is_huge": True,
"smooth_chans": 6,
"min_chan": 4,
"peak_snr": 4.,
"min_snr": 2,
"edge_thresh": 1,
"verbose": True,
},
moment_kwargs={"num_cores": 1,
"verbose": False,
"chunk_size": 1e5,
"make_peakvels": False},)
|
Make a 17B HI cube matched to the 30-m IRAM cube
|
Make a 17B HI cube matched to the 30-m IRAM cube
|
Python
|
mit
|
e-koch/VLA_Lband,e-koch/VLA_Lband
|
Make a 17B HI cube matched to the 30-m IRAM cube
|
'''
Create a cube that is spatially-matched and convolved to the IRAM 30-m
CO(2-1) cube.
'''
import os
from os.path import join as osjoin
from cube_analysis.reprojection import reproject_cube
from cube_analysis.run_pipe import run_pipeline
from paths import (seventeenB_HI_data_1kms_wGBT_path,
seventeenB_1kms_wGBT_HI_file_dict, iram_co21_data_path)
out_folder = seventeenB_HI_data_1kms_wGBT_path("iram_co21_match",
no_check=True)
if not os.path.exists(out_folder):
os.mkdir(out_folder)
out_name = seventeenB_1kms_wGBT_HI_file_dict['Cube'].split("/")[-1].rstrip(".fits") + \
".iram30m_spatialmatch.fits"
reproject_cube(seventeenB_1kms_wGBT_HI_file_dict['Cube'],
iram_co21_data_path("m33.co21_iram.fits"),
out_name,
output_folder=out_folder,
save_spectral=False,
is_huge=True,
reproject_type='spatial',
common_beam=True,
verbose=True,
chunk=40)
run_pipeline(osjoin(out_folder, out_name),
out_folder,
masking_kwargs={"method": "ppv_connectivity",
"save_cube": True,
"is_huge": True,
"smooth_chans": 6,
"min_chan": 4,
"peak_snr": 4.,
"min_snr": 2,
"edge_thresh": 1,
"verbose": True,
},
moment_kwargs={"num_cores": 1,
"verbose": False,
"chunk_size": 1e5,
"make_peakvels": False},)
|
<commit_before><commit_msg>Make a 17B HI cube matched to the 30-m IRAM cube<commit_after>
|
'''
Create a cube that is spatially-matched and convolved to the IRAM 30-m
CO(2-1) cube.
'''
import os
from os.path import join as osjoin
from cube_analysis.reprojection import reproject_cube
from cube_analysis.run_pipe import run_pipeline
from paths import (seventeenB_HI_data_1kms_wGBT_path,
seventeenB_1kms_wGBT_HI_file_dict, iram_co21_data_path)
out_folder = seventeenB_HI_data_1kms_wGBT_path("iram_co21_match",
no_check=True)
if not os.path.exists(out_folder):
os.mkdir(out_folder)
out_name = seventeenB_1kms_wGBT_HI_file_dict['Cube'].split("/")[-1].rstrip(".fits") + \
".iram30m_spatialmatch.fits"
reproject_cube(seventeenB_1kms_wGBT_HI_file_dict['Cube'],
iram_co21_data_path("m33.co21_iram.fits"),
out_name,
output_folder=out_folder,
save_spectral=False,
is_huge=True,
reproject_type='spatial',
common_beam=True,
verbose=True,
chunk=40)
run_pipeline(osjoin(out_folder, out_name),
out_folder,
masking_kwargs={"method": "ppv_connectivity",
"save_cube": True,
"is_huge": True,
"smooth_chans": 6,
"min_chan": 4,
"peak_snr": 4.,
"min_snr": 2,
"edge_thresh": 1,
"verbose": True,
},
moment_kwargs={"num_cores": 1,
"verbose": False,
"chunk_size": 1e5,
"make_peakvels": False},)
|
Make a 17B HI cube matched to the 30-m IRAM cube
'''
Create a cube that is spatially-matched and convolved to the IRAM 30-m
CO(2-1) cube.
'''
import os
from os.path import join as osjoin
from cube_analysis.reprojection import reproject_cube
from cube_analysis.run_pipe import run_pipeline
from paths import (seventeenB_HI_data_1kms_wGBT_path,
seventeenB_1kms_wGBT_HI_file_dict, iram_co21_data_path)
out_folder = seventeenB_HI_data_1kms_wGBT_path("iram_co21_match",
no_check=True)
if not os.path.exists(out_folder):
os.mkdir(out_folder)
out_name = seventeenB_1kms_wGBT_HI_file_dict['Cube'].split("/")[-1].rstrip(".fits") + \
".iram30m_spatialmatch.fits"
reproject_cube(seventeenB_1kms_wGBT_HI_file_dict['Cube'],
iram_co21_data_path("m33.co21_iram.fits"),
out_name,
output_folder=out_folder,
save_spectral=False,
is_huge=True,
reproject_type='spatial',
common_beam=True,
verbose=True,
chunk=40)
run_pipeline(osjoin(out_folder, out_name),
out_folder,
masking_kwargs={"method": "ppv_connectivity",
"save_cube": True,
"is_huge": True,
"smooth_chans": 6,
"min_chan": 4,
"peak_snr": 4.,
"min_snr": 2,
"edge_thresh": 1,
"verbose": True,
},
moment_kwargs={"num_cores": 1,
"verbose": False,
"chunk_size": 1e5,
"make_peakvels": False},)
|
<commit_before><commit_msg>Make a 17B HI cube matched to the 30-m IRAM cube<commit_after>
'''
Create a cube that is spatially-matched and convolved to the IRAM 30-m
CO(2-1) cube.
'''
import os
from os.path import join as osjoin
from cube_analysis.reprojection import reproject_cube
from cube_analysis.run_pipe import run_pipeline
from paths import (seventeenB_HI_data_1kms_wGBT_path,
seventeenB_1kms_wGBT_HI_file_dict, iram_co21_data_path)
out_folder = seventeenB_HI_data_1kms_wGBT_path("iram_co21_match",
no_check=True)
if not os.path.exists(out_folder):
os.mkdir(out_folder)
out_name = seventeenB_1kms_wGBT_HI_file_dict['Cube'].split("/")[-1].rstrip(".fits") + \
".iram30m_spatialmatch.fits"
reproject_cube(seventeenB_1kms_wGBT_HI_file_dict['Cube'],
iram_co21_data_path("m33.co21_iram.fits"),
out_name,
output_folder=out_folder,
save_spectral=False,
is_huge=True,
reproject_type='spatial',
common_beam=True,
verbose=True,
chunk=40)
run_pipeline(osjoin(out_folder, out_name),
out_folder,
masking_kwargs={"method": "ppv_connectivity",
"save_cube": True,
"is_huge": True,
"smooth_chans": 6,
"min_chan": 4,
"peak_snr": 4.,
"min_snr": 2,
"edge_thresh": 1,
"verbose": True,
},
moment_kwargs={"num_cores": 1,
"verbose": False,
"chunk_size": 1e5,
"make_peakvels": False},)
|
|
126893b50e5d72998398dd1c3687fbf102e6b441
|
app/feed.py
|
app/feed.py
|
#!/usr/bin/python
# Documentation: http://api.mongodb.org/python/
# A python script connecting to a MongoDB given a MongoDB Connection URI.
import sys
import pymongo
import os
### Create seed data
SEED_DATA = [
{
'decade': '1970s',
'artist': 'Debby Boone',
'song': 'You Light Up My Life',
'weeksAtOne': 10
},
{
'decade': '1980s',
'artist': 'Olivia Newton-John',
'song': 'Physical',
'weeksAtOne': 10
},
{
'decade': '1990s',
'artist': 'Mariah Carey',
'song': 'One Sweet Day',
'weeksAtOne': 16
}
]
### Standard URI format: mongodb://[dbuser:dbpassword@]host:port/dbname
MONGODB_URI = 'mongodb://heroku:f6a7beb1d678f34e3bbef2d5a6e62cbd@paulo.mongohq.com:10025/app18218091'
###############################################################################
# main
###############################################################################
def main(args):
client = pymongo.MongoClient(MONGODB_URI)
db = client.get_default_database()
# First we'll add a few songs. Nothing is required to create the songs
# collection; it is created automatically when we insert.
songs = db['songs']
# Note that the insert method can take either an array or a single dict.
songs.insert(SEED_DATA)
# Then we need to give Boyz II Men credit for their contribution to
# the hit "One Sweet Day".
query = {'song': 'One Sweet Day'}
songs.update(query, {'$set': {'artist': 'Mariah Carey ft. Boyz II Men'}})
# Finally we run a query which returns all the hits that spent 10 or
# more weeks at number 1.
cursor = songs.find({'weeksAtOne': {'$gte': 10}}).sort('decade', 1)
for doc in cursor:
print ('In the %s, %s by %s topped the charts for %d straight weeks.' %
(doc['decade'], doc['song'], doc['artist'], doc['weeksAtOne']))
### Since this is an example, we'll clean up after ourselves.
db.drop_collection('songs')
### Only close the connection when your app is terminating
client.close()
if __name__ == '__main__':
main(sys.argv[1:])
|
Set up mongodb addon to heroku app; created a test python script to connect to db
|
Set up mongodb addon to heroku app; created a test python script to connect to db
|
Python
|
mit
|
hw3jung/Gucci,hw3jung/Gucci
|
Set up mongodb addon to heroku app; created a test python script to connect to db
|
#!/usr/bin/python
# Documentation: http://api.mongodb.org/python/
# A python script connecting to a MongoDB given a MongoDB Connection URI.
import sys
import pymongo
import os
### Create seed data
SEED_DATA = [
{
'decade': '1970s',
'artist': 'Debby Boone',
'song': 'You Light Up My Life',
'weeksAtOne': 10
},
{
'decade': '1980s',
'artist': 'Olivia Newton-John',
'song': 'Physical',
'weeksAtOne': 10
},
{
'decade': '1990s',
'artist': 'Mariah Carey',
'song': 'One Sweet Day',
'weeksAtOne': 16
}
]
### Standard URI format: mongodb://[dbuser:dbpassword@]host:port/dbname
MONGODB_URI = 'mongodb://heroku:f6a7beb1d678f34e3bbef2d5a6e62cbd@paulo.mongohq.com:10025/app18218091'
###############################################################################
# main
###############################################################################
def main(args):
client = pymongo.MongoClient(MONGODB_URI)
db = client.get_default_database()
# First we'll add a few songs. Nothing is required to create the songs
# collection; it is created automatically when we insert.
songs = db['songs']
# Note that the insert method can take either an array or a single dict.
songs.insert(SEED_DATA)
# Then we need to give Boyz II Men credit for their contribution to
# the hit "One Sweet Day".
query = {'song': 'One Sweet Day'}
songs.update(query, {'$set': {'artist': 'Mariah Carey ft. Boyz II Men'}})
# Finally we run a query which returns all the hits that spent 10 or
# more weeks at number 1.
cursor = songs.find({'weeksAtOne': {'$gte': 10}}).sort('decade', 1)
for doc in cursor:
print ('In the %s, %s by %s topped the charts for %d straight weeks.' %
(doc['decade'], doc['song'], doc['artist'], doc['weeksAtOne']))
### Since this is an example, we'll clean up after ourselves.
db.drop_collection('songs')
### Only close the connection when your app is terminating
client.close()
if __name__ == '__main__':
main(sys.argv[1:])
|
<commit_before><commit_msg>Set up mongodb addon to heroku app; created a test python script to connect to db<commit_after>
|
#!/usr/bin/python
# Documentation: http://api.mongodb.org/python/
# A python script connecting to a MongoDB given a MongoDB Connection URI.
import sys
import pymongo
import os
### Create seed data
SEED_DATA = [
{
'decade': '1970s',
'artist': 'Debby Boone',
'song': 'You Light Up My Life',
'weeksAtOne': 10
},
{
'decade': '1980s',
'artist': 'Olivia Newton-John',
'song': 'Physical',
'weeksAtOne': 10
},
{
'decade': '1990s',
'artist': 'Mariah Carey',
'song': 'One Sweet Day',
'weeksAtOne': 16
}
]
### Standard URI format: mongodb://[dbuser:dbpassword@]host:port/dbname
MONGODB_URI = 'mongodb://heroku:f6a7beb1d678f34e3bbef2d5a6e62cbd@paulo.mongohq.com:10025/app18218091'
###############################################################################
# main
###############################################################################
def main(args):
client = pymongo.MongoClient(MONGODB_URI)
db = client.get_default_database()
# First we'll add a few songs. Nothing is required to create the songs
# collection; it is created automatically when we insert.
songs = db['songs']
# Note that the insert method can take either an array or a single dict.
songs.insert(SEED_DATA)
# Then we need to give Boyz II Men credit for their contribution to
# the hit "One Sweet Day".
query = {'song': 'One Sweet Day'}
songs.update(query, {'$set': {'artist': 'Mariah Carey ft. Boyz II Men'}})
# Finally we run a query which returns all the hits that spent 10 or
# more weeks at number 1.
cursor = songs.find({'weeksAtOne': {'$gte': 10}}).sort('decade', 1)
for doc in cursor:
print ('In the %s, %s by %s topped the charts for %d straight weeks.' %
(doc['decade'], doc['song'], doc['artist'], doc['weeksAtOne']))
### Since this is an example, we'll clean up after ourselves.
db.drop_collection('songs')
### Only close the connection when your app is terminating
client.close()
if __name__ == '__main__':
main(sys.argv[1:])
|
Set up mongodb addon to heroku app; created a test python script to connect to db#!/usr/bin/python
# Documentation: http://api.mongodb.org/python/
# A python script connecting to a MongoDB given a MongoDB Connection URI.
import sys
import pymongo
import os
### Create seed data
SEED_DATA = [
{
'decade': '1970s',
'artist': 'Debby Boone',
'song': 'You Light Up My Life',
'weeksAtOne': 10
},
{
'decade': '1980s',
'artist': 'Olivia Newton-John',
'song': 'Physical',
'weeksAtOne': 10
},
{
'decade': '1990s',
'artist': 'Mariah Carey',
'song': 'One Sweet Day',
'weeksAtOne': 16
}
]
### Standard URI format: mongodb://[dbuser:dbpassword@]host:port/dbname
MONGODB_URI = 'mongodb://heroku:f6a7beb1d678f34e3bbef2d5a6e62cbd@paulo.mongohq.com:10025/app18218091'
###############################################################################
# main
###############################################################################
def main(args):
client = pymongo.MongoClient(MONGODB_URI)
db = client.get_default_database()
# First we'll add a few songs. Nothing is required to create the songs
# collection; it is created automatically when we insert.
songs = db['songs']
# Note that the insert method can take either an array or a single dict.
songs.insert(SEED_DATA)
# Then we need to give Boyz II Men credit for their contribution to
# the hit "One Sweet Day".
query = {'song': 'One Sweet Day'}
songs.update(query, {'$set': {'artist': 'Mariah Carey ft. Boyz II Men'}})
# Finally we run a query which returns all the hits that spent 10 or
# more weeks at number 1.
cursor = songs.find({'weeksAtOne': {'$gte': 10}}).sort('decade', 1)
for doc in cursor:
print ('In the %s, %s by %s topped the charts for %d straight weeks.' %
(doc['decade'], doc['song'], doc['artist'], doc['weeksAtOne']))
### Since this is an example, we'll clean up after ourselves.
db.drop_collection('songs')
### Only close the connection when your app is terminating
client.close()
if __name__ == '__main__':
main(sys.argv[1:])
|
<commit_before><commit_msg>Set up mongodb addon to heroku app; created a test python script to connect to db<commit_after>#!/usr/bin/python
# Documentation: http://api.mongodb.org/python/
# A python script connecting to a MongoDB given a MongoDB Connection URI.
import sys
import pymongo
import os
### Create seed data
SEED_DATA = [
{
'decade': '1970s',
'artist': 'Debby Boone',
'song': 'You Light Up My Life',
'weeksAtOne': 10
},
{
'decade': '1980s',
'artist': 'Olivia Newton-John',
'song': 'Physical',
'weeksAtOne': 10
},
{
'decade': '1990s',
'artist': 'Mariah Carey',
'song': 'One Sweet Day',
'weeksAtOne': 16
}
]
### Standard URI format: mongodb://[dbuser:dbpassword@]host:port/dbname
MONGODB_URI = 'mongodb://heroku:f6a7beb1d678f34e3bbef2d5a6e62cbd@paulo.mongohq.com:10025/app18218091'
###############################################################################
# main
###############################################################################
def main(args):
client = pymongo.MongoClient(MONGODB_URI)
db = client.get_default_database()
# First we'll add a few songs. Nothing is required to create the songs
# collection; it is created automatically when we insert.
songs = db['songs']
# Note that the insert method can take either an array or a single dict.
songs.insert(SEED_DATA)
# Then we need to give Boyz II Men credit for their contribution to
# the hit "One Sweet Day".
query = {'song': 'One Sweet Day'}
songs.update(query, {'$set': {'artist': 'Mariah Carey ft. Boyz II Men'}})
# Finally we run a query which returns all the hits that spent 10 or
# more weeks at number 1.
cursor = songs.find({'weeksAtOne': {'$gte': 10}}).sort('decade', 1)
for doc in cursor:
print ('In the %s, %s by %s topped the charts for %d straight weeks.' %
(doc['decade'], doc['song'], doc['artist'], doc['weeksAtOne']))
### Since this is an example, we'll clean up after ourselves.
db.drop_collection('songs')
### Only close the connection when your app is terminating
client.close()
if __name__ == '__main__':
main(sys.argv[1:])
|
|
905f46ffba7f008a6f290cd5db9060ba62e6d576
|
test/test_bezier_direct.py
|
test/test_bezier_direct.py
|
from __future__ import division
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
import cocos
from cocos.director import director
from cocos.actions import Bezier
from cocos.sprite import Sprite
import pyglet
from cocos import path
def direct_bezier(p0, p1, p2, p3):
'''Given four points, returns a bezier path that go through them.
It starts in p0, finish in p3, and pass through p1 and p2 in t=0.4 and
t=0.6 respectively.
'''
def _one_dim(p0xy, B1xy, B2xy, p3xy):
'''Calculates the p1 and p2 to draw through B1 and B2 given p0 and p3.
p0: P sub 0 of bezier, it's also B(0)
B1: B(0.4)
B2: B(0.6)
p3: P sub 3 of bezier, it's also B(1)
'''
p2xy = (1.5 * B2xy - B1xy + 0.12 * p0xy - 0.26 * p3xy) / 0.36
p1xy = (B2xy - 0.064 * p0xy - 0.432 * p2xy - 0.216 * p3xy) / 0.288
return p1xy, p2xy
bp1x, bp2x = _one_dim(p0[0], p1[0], p2[0], p3[0])
bp1y, bp2y = _one_dim(p0[1], p1[1], p2[1], p3[1])
bp1 = bp1x, bp1y
bp2 = bp2x, bp2y
bezier_path = path.Bezier(p0, p3, bp1, bp2)
return bezier_path
class TestLayer(cocos.layer.Layer):
def __init__(self):
super( TestLayer, self ).__init__()
go_through = [(100,300), (370,330), (430,270), (750,550)]
# visually spot through where it should go
for pos in go_through:
sprite = Sprite('fire.png')
sprite.position = pos
sprite.scale = .3
self.add(sprite)
# calculate the points
bezier_path = direct_bezier(*go_through)
sprite = Sprite('fire.png')
sprite.scale = .3
sprite.color = (0, 0, 255)
self.add(sprite)
sprite.do(Bezier(bezier_path, 5))
if __name__ == "__main__":
director.init(width=800, height=600)
test_layer = TestLayer ()
main_scene = cocos.scene.Scene (test_layer)
director.run (main_scene)
|
Test using bezier going through 4 specific points
|
Test using bezier going through 4 specific points
|
Python
|
bsd-3-clause
|
google-code-export/los-cocos,google-code-export/los-cocos
|
Test using bezier going through 4 specific points
|
from __future__ import division
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
import cocos
from cocos.director import director
from cocos.actions import Bezier
from cocos.sprite import Sprite
import pyglet
from cocos import path
def direct_bezier(p0, p1, p2, p3):
'''Given four points, returns a bezier path that go through them.
It starts in p0, finish in p3, and pass through p1 and p2 in t=0.4 and
t=0.6 respectively.
'''
def _one_dim(p0xy, B1xy, B2xy, p3xy):
'''Calculates the p1 and p2 to draw through B1 and B2 given p0 and p3.
p0: P sub 0 of bezier, it's also B(0)
B1: B(0.4)
B2: B(0.6)
p3: P sub 3 of bezier, it's also B(1)
'''
p2xy = (1.5 * B2xy - B1xy + 0.12 * p0xy - 0.26 * p3xy) / 0.36
p1xy = (B2xy - 0.064 * p0xy - 0.432 * p2xy - 0.216 * p3xy) / 0.288
return p1xy, p2xy
bp1x, bp2x = _one_dim(p0[0], p1[0], p2[0], p3[0])
bp1y, bp2y = _one_dim(p0[1], p1[1], p2[1], p3[1])
bp1 = bp1x, bp1y
bp2 = bp2x, bp2y
bezier_path = path.Bezier(p0, p3, bp1, bp2)
return bezier_path
class TestLayer(cocos.layer.Layer):
def __init__(self):
super( TestLayer, self ).__init__()
go_through = [(100,300), (370,330), (430,270), (750,550)]
# visually spot through where it should go
for pos in go_through:
sprite = Sprite('fire.png')
sprite.position = pos
sprite.scale = .3
self.add(sprite)
# calculate the points
bezier_path = direct_bezier(*go_through)
sprite = Sprite('fire.png')
sprite.scale = .3
sprite.color = (0, 0, 255)
self.add(sprite)
sprite.do(Bezier(bezier_path, 5))
if __name__ == "__main__":
director.init(width=800, height=600)
test_layer = TestLayer ()
main_scene = cocos.scene.Scene (test_layer)
director.run (main_scene)
|
<commit_before><commit_msg>Test using bezier going through 4 specific points<commit_after>
|
from __future__ import division
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
import cocos
from cocos.director import director
from cocos.actions import Bezier
from cocos.sprite import Sprite
import pyglet
from cocos import path
def direct_bezier(p0, p1, p2, p3):
'''Given four points, returns a bezier path that go through them.
It starts in p0, finish in p3, and pass through p1 and p2 in t=0.4 and
t=0.6 respectively.
'''
def _one_dim(p0xy, B1xy, B2xy, p3xy):
'''Calculates the p1 and p2 to draw through B1 and B2 given p0 and p3.
p0: P sub 0 of bezier, it's also B(0)
B1: B(0.4)
B2: B(0.6)
p3: P sub 3 of bezier, it's also B(1)
'''
p2xy = (1.5 * B2xy - B1xy + 0.12 * p0xy - 0.26 * p3xy) / 0.36
p1xy = (B2xy - 0.064 * p0xy - 0.432 * p2xy - 0.216 * p3xy) / 0.288
return p1xy, p2xy
bp1x, bp2x = _one_dim(p0[0], p1[0], p2[0], p3[0])
bp1y, bp2y = _one_dim(p0[1], p1[1], p2[1], p3[1])
bp1 = bp1x, bp1y
bp2 = bp2x, bp2y
bezier_path = path.Bezier(p0, p3, bp1, bp2)
return bezier_path
class TestLayer(cocos.layer.Layer):
def __init__(self):
super( TestLayer, self ).__init__()
go_through = [(100,300), (370,330), (430,270), (750,550)]
# visually spot through where it should go
for pos in go_through:
sprite = Sprite('fire.png')
sprite.position = pos
sprite.scale = .3
self.add(sprite)
# calculate the points
bezier_path = direct_bezier(*go_through)
sprite = Sprite('fire.png')
sprite.scale = .3
sprite.color = (0, 0, 255)
self.add(sprite)
sprite.do(Bezier(bezier_path, 5))
if __name__ == "__main__":
director.init(width=800, height=600)
test_layer = TestLayer ()
main_scene = cocos.scene.Scene (test_layer)
director.run (main_scene)
|
Test using bezier going through 4 specific pointsfrom __future__ import division
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
import cocos
from cocos.director import director
from cocos.actions import Bezier
from cocos.sprite import Sprite
import pyglet
from cocos import path
def direct_bezier(p0, p1, p2, p3):
'''Given four points, returns a bezier path that go through them.
It starts in p0, finish in p3, and pass through p1 and p2 in t=0.4 and
t=0.6 respectively.
'''
def _one_dim(p0xy, B1xy, B2xy, p3xy):
'''Calculates the p1 and p2 to draw through B1 and B2 given p0 and p3.
p0: P sub 0 of bezier, it's also B(0)
B1: B(0.4)
B2: B(0.6)
p3: P sub 3 of bezier, it's also B(1)
'''
p2xy = (1.5 * B2xy - B1xy + 0.12 * p0xy - 0.26 * p3xy) / 0.36
p1xy = (B2xy - 0.064 * p0xy - 0.432 * p2xy - 0.216 * p3xy) / 0.288
return p1xy, p2xy
bp1x, bp2x = _one_dim(p0[0], p1[0], p2[0], p3[0])
bp1y, bp2y = _one_dim(p0[1], p1[1], p2[1], p3[1])
bp1 = bp1x, bp1y
bp2 = bp2x, bp2y
bezier_path = path.Bezier(p0, p3, bp1, bp2)
return bezier_path
class TestLayer(cocos.layer.Layer):
def __init__(self):
super( TestLayer, self ).__init__()
go_through = [(100,300), (370,330), (430,270), (750,550)]
# visually spot through where it should go
for pos in go_through:
sprite = Sprite('fire.png')
sprite.position = pos
sprite.scale = .3
self.add(sprite)
# calculate the points
bezier_path = direct_bezier(*go_through)
sprite = Sprite('fire.png')
sprite.scale = .3
sprite.color = (0, 0, 255)
self.add(sprite)
sprite.do(Bezier(bezier_path, 5))
if __name__ == "__main__":
director.init(width=800, height=600)
test_layer = TestLayer ()
main_scene = cocos.scene.Scene (test_layer)
director.run (main_scene)
|
<commit_before><commit_msg>Test using bezier going through 4 specific points<commit_after>from __future__ import division
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
import cocos
from cocos.director import director
from cocos.actions import Bezier
from cocos.sprite import Sprite
import pyglet
from cocos import path
def direct_bezier(p0, p1, p2, p3):
'''Given four points, returns a bezier path that go through them.
It starts in p0, finish in p3, and pass through p1 and p2 in t=0.4 and
t=0.6 respectively.
'''
def _one_dim(p0xy, B1xy, B2xy, p3xy):
'''Calculates the p1 and p2 to draw through B1 and B2 given p0 and p3.
p0: P sub 0 of bezier, it's also B(0)
B1: B(0.4)
B2: B(0.6)
p3: P sub 3 of bezier, it's also B(1)
'''
p2xy = (1.5 * B2xy - B1xy + 0.12 * p0xy - 0.26 * p3xy) / 0.36
p1xy = (B2xy - 0.064 * p0xy - 0.432 * p2xy - 0.216 * p3xy) / 0.288
return p1xy, p2xy
bp1x, bp2x = _one_dim(p0[0], p1[0], p2[0], p3[0])
bp1y, bp2y = _one_dim(p0[1], p1[1], p2[1], p3[1])
bp1 = bp1x, bp1y
bp2 = bp2x, bp2y
bezier_path = path.Bezier(p0, p3, bp1, bp2)
return bezier_path
class TestLayer(cocos.layer.Layer):
def __init__(self):
super( TestLayer, self ).__init__()
go_through = [(100,300), (370,330), (430,270), (750,550)]
# visually spot through where it should go
for pos in go_through:
sprite = Sprite('fire.png')
sprite.position = pos
sprite.scale = .3
self.add(sprite)
# calculate the points
bezier_path = direct_bezier(*go_through)
sprite = Sprite('fire.png')
sprite.scale = .3
sprite.color = (0, 0, 255)
self.add(sprite)
sprite.do(Bezier(bezier_path, 5))
if __name__ == "__main__":
director.init(width=800, height=600)
test_layer = TestLayer ()
main_scene = cocos.scene.Scene (test_layer)
director.run (main_scene)
|
|
470b7313fa9b176fa4492ac9f355acd21542265d
|
tests/test_lib_fallback.py
|
tests/test_lib_fallback.py
|
from mock import patch
from pytest import raises
from tvrenamr.errors import NoMoreLibrariesException
from tvrenamr.main import Episode
from .base import BaseTest
from .mock_requests import initially_bad_xml, invalid_xml
class TestLibrariesFallback(BaseTest):
@patch('tvrenamr.libraries.requests.get', new=invalid_xml)
def test_rename_with_all_libraries_returning_invalid_xml(self):
with raises(NoMoreLibrariesException):
self.tv.retrieve_episode_title(self._file.episodes[0])
@patch('tvrenamr.libraries.requests.get', new=initially_bad_xml)
def test_rename_with_tvdb_falling_over(self):
episode = Episode(self._file, '8')
title = self.tv.retrieve_episode_title(episode)
assert title == 'The Adhesive Duck Deficiency'
|
from mock import patch
from pytest import raises
from tvrenamr.errors import NoMoreLibrariesException
from tvrenamr.libraries import TheTvDb, TvRage
from tvrenamr.main import Episode
from .base import BaseTest
from .mock_requests import initially_bad_xml, invalid_xml
class TestLibrariesFallback(BaseTest):
@patch('tvrenamr.libraries.requests.get', new=invalid_xml)
def test_rename_with_all_libraries_returning_invalid_xml(self):
with raises(NoMoreLibrariesException):
self.tv.retrieve_episode_title(self._file.episodes[0])
@patch('tvrenamr.libraries.requests.get', new=initially_bad_xml)
def test_rename_with_tvdb_falling_over(self):
episode = Episode(self._file, '8')
title = self.tv.retrieve_episode_title(episode)
assert title == 'The Adhesive Duck Deficiency'
def test_setting_library_stops_fallback(self):
libraries = self.tv._get_libraries('thetvdb')
assert type(libraries) == list
assert len(libraries) == 1
assert libraries[0] == TheTvDb
libraries = self.tv._get_libraries('tvrage')
assert type(libraries) == list
assert len(libraries) == 1
assert libraries[0] == TvRage
|
Test library fallback is overridden by setting a library
|
Test library fallback is overridden by setting a library
|
Python
|
mit
|
wintersandroid/tvrenamr,ghickman/tvrenamr
|
from mock import patch
from pytest import raises
from tvrenamr.errors import NoMoreLibrariesException
from tvrenamr.main import Episode
from .base import BaseTest
from .mock_requests import initially_bad_xml, invalid_xml
class TestLibrariesFallback(BaseTest):
@patch('tvrenamr.libraries.requests.get', new=invalid_xml)
def test_rename_with_all_libraries_returning_invalid_xml(self):
with raises(NoMoreLibrariesException):
self.tv.retrieve_episode_title(self._file.episodes[0])
@patch('tvrenamr.libraries.requests.get', new=initially_bad_xml)
def test_rename_with_tvdb_falling_over(self):
episode = Episode(self._file, '8')
title = self.tv.retrieve_episode_title(episode)
assert title == 'The Adhesive Duck Deficiency'
Test library fallback is overridden by setting a library
|
from mock import patch
from pytest import raises
from tvrenamr.errors import NoMoreLibrariesException
from tvrenamr.libraries import TheTvDb, TvRage
from tvrenamr.main import Episode
from .base import BaseTest
from .mock_requests import initially_bad_xml, invalid_xml
class TestLibrariesFallback(BaseTest):
@patch('tvrenamr.libraries.requests.get', new=invalid_xml)
def test_rename_with_all_libraries_returning_invalid_xml(self):
with raises(NoMoreLibrariesException):
self.tv.retrieve_episode_title(self._file.episodes[0])
@patch('tvrenamr.libraries.requests.get', new=initially_bad_xml)
def test_rename_with_tvdb_falling_over(self):
episode = Episode(self._file, '8')
title = self.tv.retrieve_episode_title(episode)
assert title == 'The Adhesive Duck Deficiency'
def test_setting_library_stops_fallback(self):
libraries = self.tv._get_libraries('thetvdb')
assert type(libraries) == list
assert len(libraries) == 1
assert libraries[0] == TheTvDb
libraries = self.tv._get_libraries('tvrage')
assert type(libraries) == list
assert len(libraries) == 1
assert libraries[0] == TvRage
|
<commit_before>from mock import patch
from pytest import raises
from tvrenamr.errors import NoMoreLibrariesException
from tvrenamr.main import Episode
from .base import BaseTest
from .mock_requests import initially_bad_xml, invalid_xml
class TestLibrariesFallback(BaseTest):
@patch('tvrenamr.libraries.requests.get', new=invalid_xml)
def test_rename_with_all_libraries_returning_invalid_xml(self):
with raises(NoMoreLibrariesException):
self.tv.retrieve_episode_title(self._file.episodes[0])
@patch('tvrenamr.libraries.requests.get', new=initially_bad_xml)
def test_rename_with_tvdb_falling_over(self):
episode = Episode(self._file, '8')
title = self.tv.retrieve_episode_title(episode)
assert title == 'The Adhesive Duck Deficiency'
<commit_msg>Test library fallback is overridden by setting a library<commit_after>
|
from mock import patch
from pytest import raises
from tvrenamr.errors import NoMoreLibrariesException
from tvrenamr.libraries import TheTvDb, TvRage
from tvrenamr.main import Episode
from .base import BaseTest
from .mock_requests import initially_bad_xml, invalid_xml
class TestLibrariesFallback(BaseTest):
@patch('tvrenamr.libraries.requests.get', new=invalid_xml)
def test_rename_with_all_libraries_returning_invalid_xml(self):
with raises(NoMoreLibrariesException):
self.tv.retrieve_episode_title(self._file.episodes[0])
@patch('tvrenamr.libraries.requests.get', new=initially_bad_xml)
def test_rename_with_tvdb_falling_over(self):
episode = Episode(self._file, '8')
title = self.tv.retrieve_episode_title(episode)
assert title == 'The Adhesive Duck Deficiency'
def test_setting_library_stops_fallback(self):
libraries = self.tv._get_libraries('thetvdb')
assert type(libraries) == list
assert len(libraries) == 1
assert libraries[0] == TheTvDb
libraries = self.tv._get_libraries('tvrage')
assert type(libraries) == list
assert len(libraries) == 1
assert libraries[0] == TvRage
|
from mock import patch
from pytest import raises
from tvrenamr.errors import NoMoreLibrariesException
from tvrenamr.main import Episode
from .base import BaseTest
from .mock_requests import initially_bad_xml, invalid_xml
class TestLibrariesFallback(BaseTest):
@patch('tvrenamr.libraries.requests.get', new=invalid_xml)
def test_rename_with_all_libraries_returning_invalid_xml(self):
with raises(NoMoreLibrariesException):
self.tv.retrieve_episode_title(self._file.episodes[0])
@patch('tvrenamr.libraries.requests.get', new=initially_bad_xml)
def test_rename_with_tvdb_falling_over(self):
episode = Episode(self._file, '8')
title = self.tv.retrieve_episode_title(episode)
assert title == 'The Adhesive Duck Deficiency'
Test library fallback is overridden by setting a libraryfrom mock import patch
from pytest import raises
from tvrenamr.errors import NoMoreLibrariesException
from tvrenamr.libraries import TheTvDb, TvRage
from tvrenamr.main import Episode
from .base import BaseTest
from .mock_requests import initially_bad_xml, invalid_xml
class TestLibrariesFallback(BaseTest):
@patch('tvrenamr.libraries.requests.get', new=invalid_xml)
def test_rename_with_all_libraries_returning_invalid_xml(self):
with raises(NoMoreLibrariesException):
self.tv.retrieve_episode_title(self._file.episodes[0])
@patch('tvrenamr.libraries.requests.get', new=initially_bad_xml)
def test_rename_with_tvdb_falling_over(self):
episode = Episode(self._file, '8')
title = self.tv.retrieve_episode_title(episode)
assert title == 'The Adhesive Duck Deficiency'
def test_setting_library_stops_fallback(self):
libraries = self.tv._get_libraries('thetvdb')
assert type(libraries) == list
assert len(libraries) == 1
assert libraries[0] == TheTvDb
libraries = self.tv._get_libraries('tvrage')
assert type(libraries) == list
assert len(libraries) == 1
assert libraries[0] == TvRage
|
<commit_before>from mock import patch
from pytest import raises
from tvrenamr.errors import NoMoreLibrariesException
from tvrenamr.main import Episode
from .base import BaseTest
from .mock_requests import initially_bad_xml, invalid_xml
class TestLibrariesFallback(BaseTest):
@patch('tvrenamr.libraries.requests.get', new=invalid_xml)
def test_rename_with_all_libraries_returning_invalid_xml(self):
with raises(NoMoreLibrariesException):
self.tv.retrieve_episode_title(self._file.episodes[0])
@patch('tvrenamr.libraries.requests.get', new=initially_bad_xml)
def test_rename_with_tvdb_falling_over(self):
episode = Episode(self._file, '8')
title = self.tv.retrieve_episode_title(episode)
assert title == 'The Adhesive Duck Deficiency'
<commit_msg>Test library fallback is overridden by setting a library<commit_after>from mock import patch
from pytest import raises
from tvrenamr.errors import NoMoreLibrariesException
from tvrenamr.libraries import TheTvDb, TvRage
from tvrenamr.main import Episode
from .base import BaseTest
from .mock_requests import initially_bad_xml, invalid_xml
class TestLibrariesFallback(BaseTest):
@patch('tvrenamr.libraries.requests.get', new=invalid_xml)
def test_rename_with_all_libraries_returning_invalid_xml(self):
with raises(NoMoreLibrariesException):
self.tv.retrieve_episode_title(self._file.episodes[0])
@patch('tvrenamr.libraries.requests.get', new=initially_bad_xml)
def test_rename_with_tvdb_falling_over(self):
episode = Episode(self._file, '8')
title = self.tv.retrieve_episode_title(episode)
assert title == 'The Adhesive Duck Deficiency'
def test_setting_library_stops_fallback(self):
libraries = self.tv._get_libraries('thetvdb')
assert type(libraries) == list
assert len(libraries) == 1
assert libraries[0] == TheTvDb
libraries = self.tv._get_libraries('tvrage')
assert type(libraries) == list
assert len(libraries) == 1
assert libraries[0] == TvRage
|
c41627f6eef6647a2b80e79b7cee860c4455daeb
|
utils/apply-fixit-edits.py
|
utils/apply-fixit-edits.py
|
#!/usr/bin/env python
#===--- apply-fixit-edits.py - Tool for applying edits from .remap files ---===#
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2015 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See http://swift.org/LICENSE.txt for license information
# See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
#===------------------------------------------------------------------------===#
import subprocess
import json
import argparse
import sys
def find_remap_files(path):
out = None
try:
out = subprocess.check_output(["find", path, "-name", "*.remap"])
except subprocess.CalledProcessError:
return None
lines = out.split('\n')
lines.pop(-1)
return lines
def apply_edits(path):
remap_files = find_remap_files(path)
if not remap_files:
print "No remap files found"
return 1;
edits_set = set()
for remap_file in remap_files:
json_data = open(remap_file).read()
json_data = json_data.replace(",\n }", "\n }")
json_data = json_data.replace(",\n]", "\n]")
curr_edits = json.loads(json_data)
for ed in curr_edits:
fname = ed["file"]
offset = ed["offset"]
length = ed.get("remove", 0)
text = ed.get("text", "")
edits_set.add((fname, offset, length, text))
edits_per_file = {}
for ed in edits_set:
fname = ed[0]
if not edits_per_file.has_key(fname):
edits_per_file[fname] = []
edits_per_file[fname].append((ed[1], ed[2], ed[3]))
for fname, edits in edits_per_file.iteritems():
print 'Updating', fname
edits.sort(reverse=True)
file_data = open(fname).read()
for ed in edits:
offset = ed[0]
length = ed[1]
text = ed[2]
file_data = file_data[:offset] + text + file_data[offset+length:]
open(fname, 'w').write(file_data)
return 0
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""Finds all .remap files in a directory and applies their
edits to the source files.""")
parser.add_argument("build_dir_path",
help="path to index info")
args = parser.parse_args()
return apply_edits(args.build_dir_path)
if __name__ == "__main__":
sys.exit(main())
|
Add a script for picking up all .remap files in a directory and applying the edits therein.
|
[utils] Add a script for picking up all .remap files in a directory and applying the edits therein.
Swift SVN r31773
|
Python
|
apache-2.0
|
deyton/swift,ahoppen/swift,IngmarStein/swift,MukeshKumarS/Swift,johnno1962d/swift,shajrawi/swift,harlanhaskins/swift,gmilos/swift,calebd/swift,austinzheng/swift,arvedviehweger/swift,lorentey/swift,sschiau/swift,codestergit/swift,kperryua/swift,jtbandes/swift,uasys/swift,OscarSwanros/swift,parkera/swift,xedin/swift,jtbandes/swift,jckarter/swift,tjw/swift,shahmishal/swift,tkremenek/swift,glessard/swift,amraboelela/swift,austinzheng/swift,natecook1000/swift,aschwaighofer/swift,johnno1962d/swift,swiftix/swift,manavgabhawala/swift,gribozavr/swift,bitjammer/swift,modocache/swift,sschiau/swift,SwiftAndroid/swift,russbishop/swift,sdulal/swift,atrick/swift,roambotics/swift,practicalswift/swift,JaSpa/swift,devincoughlin/swift,benlangmuir/swift,alblue/swift,devincoughlin/swift,natecook1000/swift,shahmishal/swift,frootloops/swift,frootloops/swift,natecook1000/swift,devincoughlin/swift,hooman/swift,djwbrown/swift,glessard/swift,slavapestov/swift,manavgabhawala/swift,danielmartin/swift,JGiola/swift,austinzheng/swift,SwiftAndroid/swift,IngmarStein/swift,brentdax/swift,stephentyrone/swift,gribozavr/swift,JGiola/swift,return/swift,IngmarStein/swift,dduan/swift,tkremenek/swift,nathawes/swift,amraboelela/swift,SwiftAndroid/swift,gmilos/swift,djwbrown/swift,swiftix/swift.old,codestergit/swift,russbishop/swift,jckarter/swift,frootloops/swift,nathawes/swift,LeoShimonaka/swift,swiftix/swift.old,codestergit/swift,kperryua/swift,manavgabhawala/swift,brentdax/swift,CodaFi/swift,gribozavr/swift,atrick/swift,kusl/swift,Ivacker/swift,ahoppen/swift,kusl/swift,atrick/swift,manavgabhawala/swift,nathawes/swift,practicalswift/swift,lorentey/swift,gottesmm/swift,emilstahl/swift,jopamer/swift,ahoppen/swift,karwa/swift,slavapestov/swift,MukeshKumarS/Swift,gottesmm/swift,airspeedswift/swift,karwa/swift,austinzheng/swift,nathawes/swift,gmilos/swift,natecook1000/swift,djwbrown/swift,uasys/swift,CodaFi/swift,JaSpa/swift,bitjammer/swift,kentya6/swift,mightydeveloper/swift,allevato/swift,jckarter/swift,nathawes/swift,deyton/swift,shajrawi/swift,gregomni/swift,allevato/swift,stephentyrone/swift,glessard/swift,modocache/swift,lorentey/swift,harlanhaskins/swift,xwu/swift,harlanhaskins/swift,kstaring/swift,adrfer/swift,emilstahl/swift,sdulal/swift,Jnosh/swift,Ivacker/swift,practicalswift/swift,xwu/swift,KrishMunot/swift,zisko/swift,arvedviehweger/swift,swiftix/swift.old,russbishop/swift,hooman/swift,kstaring/swift,swiftix/swift.old,sschiau/swift,therealbnut/swift,shahmishal/swift,ben-ng/swift,airspeedswift/swift,danielmartin/swift,emilstahl/swift,stephentyrone/swift,hughbe/swift,SwiftAndroid/swift,xwu/swift,shajrawi/swift,frootloops/swift,kentya6/swift,swiftix/swift,dreamsxin/swift,tinysun212/swift-windows,calebd/swift,rudkx/swift,brentdax/swift,shajrawi/swift,tardieu/swift,swiftix/swift.old,Ivacker/swift,tinysun212/swift-windows,sschiau/swift,arvedviehweger/swift,JGiola/swift,jopamer/swift,hooman/swift,alblue/swift,KrishMunot/swift,lorentey/swift,xedin/swift,tardieu/swift,airspeedswift/swift,gribozavr/swift,khizkhiz/swift,slavapestov/swift,roambotics/swift,modocache/swift,tardieu/swift,slavapestov/swift,djwbrown/swift,MukeshKumarS/Swift,IngmarStein/swift,Jnosh/swift,stephentyrone/swift,arvedviehweger/swift,allevato/swift,atrick/swift,KrishMunot/swift,deyton/swift,CodaFi/swift,kusl/swift,alblue/swift,modocache/swift,gregomni/swift,kperryua/swift,LeoShimonaka/swift,tinysun212/swift-windows,aschwaighofer/swift,OscarSwanros/swift,felix91gr/swift,nathawes/swift,tjw/swift,allevato/swift,aschwaighofer/swift,CodaFi/swift,hughbe/swift,JGiola/swift,mightydeveloper/swift,mightydeveloper/swift,kperryua/swift,kstaring/swift,kstaring/swift,uasys/swift,gottesmm/swift,kperryua/swift,arvedviehweger/swift,glessard/swift,MukeshKumarS/Swift,ken0nek/swift,modocache/swift,alblue/swift,arvedviehweger/swift,apple/swift,jmgc/swift,rudkx/swift,stephentyrone/swift,shajrawi/swift,parkera/swift,milseman/swift,alblue/swift,danielmartin/swift,swiftix/swift,sdulal/swift,tardieu/swift,parkera/swift,CodaFi/swift,deyton/swift,roambotics/swift,OscarSwanros/swift,emilstahl/swift,sdulal/swift,Ivacker/swift,SwiftAndroid/swift,jmgc/swift,codestergit/swift,rudkx/swift,ahoppen/swift,emilstahl/swift,gregomni/swift,therealbnut/swift,milseman/swift,practicalswift/swift,sschiau/swift,benlangmuir/swift,jtbandes/swift,roambotics/swift,therealbnut/swift,milseman/swift,LeoShimonaka/swift,devincoughlin/swift,gottesmm/swift,milseman/swift,dreamsxin/swift,johnno1962d/swift,milseman/swift,sdulal/swift,jckarter/swift,dduan/swift,cbrentharris/swift,alblue/swift,parkera/swift,gottesmm/swift,xwu/swift,swiftix/swift,allevato/swift,shahmishal/swift,return/swift,johnno1962d/swift,huonw/swift,codestergit/swift,arvedviehweger/swift,airspeedswift/swift,JaSpa/swift,OscarSwanros/swift,ken0nek/swift,johnno1962d/swift,cbrentharris/swift,zisko/swift,harlanhaskins/swift,hughbe/swift,CodaFi/swift,bitjammer/swift,jmgc/swift,hughbe/swift,amraboelela/swift,kentya6/swift,therealbnut/swift,devincoughlin/swift,glessard/swift,kstaring/swift,benlangmuir/swift,mightydeveloper/swift,ben-ng/swift,huonw/swift,airspeedswift/swift,adrfer/swift,tardieu/swift,Ivacker/swift,mightydeveloper/swift,jopamer/swift,ken0nek/swift,dduan/swift,swiftix/swift.old,rudkx/swift,kusl/swift,tjw/swift,sdulal/swift,kentya6/swift,hughbe/swift,Ivacker/swift,brentdax/swift,kstaring/swift,jmgc/swift,swiftix/swift,adrfer/swift,cbrentharris/swift,manavgabhawala/swift,gribozavr/swift,tkremenek/swift,russbishop/swift,hooman/swift,tkremenek/swift,felix91gr/swift,ben-ng/swift,aschwaighofer/swift,huonw/swift,KrishMunot/swift,amraboelela/swift,zisko/swift,jtbandes/swift,ken0nek/swift,danielmartin/swift,calebd/swift,return/swift,brentdax/swift,apple/swift,LeoShimonaka/swift,shahmishal/swift,felix91gr/swift,karwa/swift,aschwaighofer/swift,sschiau/swift,natecook1000/swift,LeoShimonaka/swift,karwa/swift,dduan/swift,ahoppen/swift,cbrentharris/swift,devincoughlin/swift,xedin/swift,swiftix/swift,rudkx/swift,tardieu/swift,aschwaighofer/swift,gregomni/swift,amraboelela/swift,lorentey/swift,shahmishal/swift,ben-ng/swift,johnno1962d/swift,gmilos/swift,JaSpa/swift,lorentey/swift,return/swift,khizkhiz/swift,brentdax/swift,LeoShimonaka/swift,CodaFi/swift,gribozavr/swift,uasys/swift,frootloops/swift,nathawes/swift,aschwaighofer/swift,amraboelela/swift,hooman/swift,modocache/swift,khizkhiz/swift,zisko/swift,calebd/swift,russbishop/swift,tjw/swift,sschiau/swift,parkera/swift,xedin/swift,MukeshKumarS/Swift,jopamer/swift,tinysun212/swift-windows,sdulal/swift,cbrentharris/swift,russbishop/swift,adrfer/swift,jmgc/swift,atrick/swift,jmgc/swift,cbrentharris/swift,harlanhaskins/swift,practicalswift/swift,xwu/swift,shahmishal/swift,SwiftAndroid/swift,bitjammer/swift,tjw/swift,IngmarStein/swift,stephentyrone/swift,ken0nek/swift,tkremenek/swift,uasys/swift,gregomni/swift,huonw/swift,karwa/swift,bitjammer/swift,manavgabhawala/swift,huonw/swift,swiftix/swift,therealbnut/swift,danielmartin/swift,manavgabhawala/swift,kentya6/swift,zisko/swift,mightydeveloper/swift,hooman/swift,zisko/swift,khizkhiz/swift,roambotics/swift,allevato/swift,felix91gr/swift,cbrentharris/swift,mightydeveloper/swift,deyton/swift,practicalswift/swift,kentya6/swift,Ivacker/swift,adrfer/swift,JaSpa/swift,benlangmuir/swift,Jnosh/swift,dduan/swift,austinzheng/swift,lorentey/swift,brentdax/swift,huonw/swift,jckarter/swift,practicalswift/swift,jopamer/swift,hughbe/swift,JGiola/swift,kentya6/swift,xedin/swift,slavapestov/swift,harlanhaskins/swift,lorentey/swift,djwbrown/swift,kentya6/swift,bitjammer/swift,amraboelela/swift,huonw/swift,SwiftAndroid/swift,gottesmm/swift,parkera/swift,therealbnut/swift,shahmishal/swift,khizkhiz/swift,djwbrown/swift,swiftix/swift.old,roambotics/swift,karwa/swift,gregomni/swift,alblue/swift,Jnosh/swift,codestergit/swift,milseman/swift,return/swift,allevato/swift,jtbandes/swift,slavapestov/swift,xedin/swift,kusl/swift,return/swift,emilstahl/swift,KrishMunot/swift,jtbandes/swift,shajrawi/swift,slavapestov/swift,jtbandes/swift,rudkx/swift,felix91gr/swift,ben-ng/swift,modocache/swift,calebd/swift,Jnosh/swift,airspeedswift/swift,atrick/swift,devincoughlin/swift,karwa/swift,adrfer/swift,uasys/swift,apple/swift,gmilos/swift,kperryua/swift,Jnosh/swift,gmilos/swift,xedin/swift,frootloops/swift,swiftix/swift.old,harlanhaskins/swift,sdulal/swift,gmilos/swift,gottesmm/swift,MukeshKumarS/Swift,tkremenek/swift,austinzheng/swift,tkremenek/swift,austinzheng/swift,danielmartin/swift,OscarSwanros/swift,tinysun212/swift-windows,OscarSwanros/swift,djwbrown/swift,MukeshKumarS/Swift,therealbnut/swift,jopamer/swift,uasys/swift,cbrentharris/swift,return/swift,parkera/swift,deyton/swift,airspeedswift/swift,khizkhiz/swift,apple/swift,zisko/swift,shajrawi/swift,IngmarStein/swift,ken0nek/swift,russbishop/swift,devincoughlin/swift,hooman/swift,JaSpa/swift,tjw/swift,felix91gr/swift,gribozavr/swift,mightydeveloper/swift,adrfer/swift,gribozavr/swift,IngmarStein/swift,xedin/swift,kstaring/swift,jmgc/swift,kperryua/swift,frootloops/swift,tardieu/swift,ken0nek/swift,jckarter/swift,stephentyrone/swift,benlangmuir/swift,apple/swift,tjw/swift,shajrawi/swift,emilstahl/swift,johnno1962d/swift,jopamer/swift,milseman/swift,natecook1000/swift,bitjammer/swift,practicalswift/swift,glessard/swift,KrishMunot/swift,danielmartin/swift,emilstahl/swift,hughbe/swift,felix91gr/swift,JGiola/swift,tinysun212/swift-windows,sschiau/swift,Ivacker/swift,benlangmuir/swift,codestergit/swift,OscarSwanros/swift,JaSpa/swift,dduan/swift,kusl/swift,ben-ng/swift,tinysun212/swift-windows,LeoShimonaka/swift,karwa/swift,kusl/swift,khizkhiz/swift,calebd/swift,ben-ng/swift,natecook1000/swift,Jnosh/swift,apple/swift,KrishMunot/swift,deyton/swift,xwu/swift,kusl/swift,parkera/swift,xwu/swift,calebd/swift,dduan/swift,LeoShimonaka/swift,jckarter/swift,ahoppen/swift
|
[utils] Add a script for picking up all .remap files in a directory and applying the edits therein.
Swift SVN r31773
|
#!/usr/bin/env python
#===--- apply-fixit-edits.py - Tool for applying edits from .remap files ---===#
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2015 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See http://swift.org/LICENSE.txt for license information
# See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
#===------------------------------------------------------------------------===#
import subprocess
import json
import argparse
import sys
def find_remap_files(path):
out = None
try:
out = subprocess.check_output(["find", path, "-name", "*.remap"])
except subprocess.CalledProcessError:
return None
lines = out.split('\n')
lines.pop(-1)
return lines
def apply_edits(path):
remap_files = find_remap_files(path)
if not remap_files:
print "No remap files found"
return 1;
edits_set = set()
for remap_file in remap_files:
json_data = open(remap_file).read()
json_data = json_data.replace(",\n }", "\n }")
json_data = json_data.replace(",\n]", "\n]")
curr_edits = json.loads(json_data)
for ed in curr_edits:
fname = ed["file"]
offset = ed["offset"]
length = ed.get("remove", 0)
text = ed.get("text", "")
edits_set.add((fname, offset, length, text))
edits_per_file = {}
for ed in edits_set:
fname = ed[0]
if not edits_per_file.has_key(fname):
edits_per_file[fname] = []
edits_per_file[fname].append((ed[1], ed[2], ed[3]))
for fname, edits in edits_per_file.iteritems():
print 'Updating', fname
edits.sort(reverse=True)
file_data = open(fname).read()
for ed in edits:
offset = ed[0]
length = ed[1]
text = ed[2]
file_data = file_data[:offset] + text + file_data[offset+length:]
open(fname, 'w').write(file_data)
return 0
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""Finds all .remap files in a directory and applies their
edits to the source files.""")
parser.add_argument("build_dir_path",
help="path to index info")
args = parser.parse_args()
return apply_edits(args.build_dir_path)
if __name__ == "__main__":
sys.exit(main())
|
<commit_before><commit_msg>[utils] Add a script for picking up all .remap files in a directory and applying the edits therein.
Swift SVN r31773<commit_after>
|
#!/usr/bin/env python
#===--- apply-fixit-edits.py - Tool for applying edits from .remap files ---===#
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2015 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See http://swift.org/LICENSE.txt for license information
# See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
#===------------------------------------------------------------------------===#
import subprocess
import json
import argparse
import sys
def find_remap_files(path):
out = None
try:
out = subprocess.check_output(["find", path, "-name", "*.remap"])
except subprocess.CalledProcessError:
return None
lines = out.split('\n')
lines.pop(-1)
return lines
def apply_edits(path):
remap_files = find_remap_files(path)
if not remap_files:
print "No remap files found"
return 1;
edits_set = set()
for remap_file in remap_files:
json_data = open(remap_file).read()
json_data = json_data.replace(",\n }", "\n }")
json_data = json_data.replace(",\n]", "\n]")
curr_edits = json.loads(json_data)
for ed in curr_edits:
fname = ed["file"]
offset = ed["offset"]
length = ed.get("remove", 0)
text = ed.get("text", "")
edits_set.add((fname, offset, length, text))
edits_per_file = {}
for ed in edits_set:
fname = ed[0]
if not edits_per_file.has_key(fname):
edits_per_file[fname] = []
edits_per_file[fname].append((ed[1], ed[2], ed[3]))
for fname, edits in edits_per_file.iteritems():
print 'Updating', fname
edits.sort(reverse=True)
file_data = open(fname).read()
for ed in edits:
offset = ed[0]
length = ed[1]
text = ed[2]
file_data = file_data[:offset] + text + file_data[offset+length:]
open(fname, 'w').write(file_data)
return 0
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""Finds all .remap files in a directory and applies their
edits to the source files.""")
parser.add_argument("build_dir_path",
help="path to index info")
args = parser.parse_args()
return apply_edits(args.build_dir_path)
if __name__ == "__main__":
sys.exit(main())
|
[utils] Add a script for picking up all .remap files in a directory and applying the edits therein.
Swift SVN r31773#!/usr/bin/env python
#===--- apply-fixit-edits.py - Tool for applying edits from .remap files ---===#
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2015 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See http://swift.org/LICENSE.txt for license information
# See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
#===------------------------------------------------------------------------===#
import subprocess
import json
import argparse
import sys
def find_remap_files(path):
out = None
try:
out = subprocess.check_output(["find", path, "-name", "*.remap"])
except subprocess.CalledProcessError:
return None
lines = out.split('\n')
lines.pop(-1)
return lines
def apply_edits(path):
remap_files = find_remap_files(path)
if not remap_files:
print "No remap files found"
return 1;
edits_set = set()
for remap_file in remap_files:
json_data = open(remap_file).read()
json_data = json_data.replace(",\n }", "\n }")
json_data = json_data.replace(",\n]", "\n]")
curr_edits = json.loads(json_data)
for ed in curr_edits:
fname = ed["file"]
offset = ed["offset"]
length = ed.get("remove", 0)
text = ed.get("text", "")
edits_set.add((fname, offset, length, text))
edits_per_file = {}
for ed in edits_set:
fname = ed[0]
if not edits_per_file.has_key(fname):
edits_per_file[fname] = []
edits_per_file[fname].append((ed[1], ed[2], ed[3]))
for fname, edits in edits_per_file.iteritems():
print 'Updating', fname
edits.sort(reverse=True)
file_data = open(fname).read()
for ed in edits:
offset = ed[0]
length = ed[1]
text = ed[2]
file_data = file_data[:offset] + text + file_data[offset+length:]
open(fname, 'w').write(file_data)
return 0
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""Finds all .remap files in a directory and applies their
edits to the source files.""")
parser.add_argument("build_dir_path",
help="path to index info")
args = parser.parse_args()
return apply_edits(args.build_dir_path)
if __name__ == "__main__":
sys.exit(main())
|
<commit_before><commit_msg>[utils] Add a script for picking up all .remap files in a directory and applying the edits therein.
Swift SVN r31773<commit_after>#!/usr/bin/env python
#===--- apply-fixit-edits.py - Tool for applying edits from .remap files ---===#
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2015 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See http://swift.org/LICENSE.txt for license information
# See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
#===------------------------------------------------------------------------===#
import subprocess
import json
import argparse
import sys
def find_remap_files(path):
out = None
try:
out = subprocess.check_output(["find", path, "-name", "*.remap"])
except subprocess.CalledProcessError:
return None
lines = out.split('\n')
lines.pop(-1)
return lines
def apply_edits(path):
remap_files = find_remap_files(path)
if not remap_files:
print "No remap files found"
return 1;
edits_set = set()
for remap_file in remap_files:
json_data = open(remap_file).read()
json_data = json_data.replace(",\n }", "\n }")
json_data = json_data.replace(",\n]", "\n]")
curr_edits = json.loads(json_data)
for ed in curr_edits:
fname = ed["file"]
offset = ed["offset"]
length = ed.get("remove", 0)
text = ed.get("text", "")
edits_set.add((fname, offset, length, text))
edits_per_file = {}
for ed in edits_set:
fname = ed[0]
if not edits_per_file.has_key(fname):
edits_per_file[fname] = []
edits_per_file[fname].append((ed[1], ed[2], ed[3]))
for fname, edits in edits_per_file.iteritems():
print 'Updating', fname
edits.sort(reverse=True)
file_data = open(fname).read()
for ed in edits:
offset = ed[0]
length = ed[1]
text = ed[2]
file_data = file_data[:offset] + text + file_data[offset+length:]
open(fname, 'w').write(file_data)
return 0
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""Finds all .remap files in a directory and applies their
edits to the source files.""")
parser.add_argument("build_dir_path",
help="path to index info")
args = parser.parse_args()
return apply_edits(args.build_dir_path)
if __name__ == "__main__":
sys.exit(main())
|
|
56ce529d115acfa9d7ae7d1cba574c26f68cb955
|
py/diameter-of-binary-tree.py
|
py/diameter-of-binary-tree.py
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def dfs(self, cur):
if cur:
ldepth = self.dfs(cur.left)
rdepth = self.dfs(cur.right)
self.m = max(self.m, ldepth + rdepth)
return max(ldepth, rdepth) + 1
else:
return 0
def diameterOfBinaryTree(self, root):
"""
:type root: TreeNode
:rtype: int
"""
self.m = 0
self.dfs(root)
return self.m
|
Add py solution for 543. Diameter of Binary Tree
|
Add py solution for 543. Diameter of Binary Tree
543. Diameter of Binary Tree: https://leetcode.com/problems/diameter-of-binary-tree/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 543. Diameter of Binary Tree
543. Diameter of Binary Tree: https://leetcode.com/problems/diameter-of-binary-tree/
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def dfs(self, cur):
if cur:
ldepth = self.dfs(cur.left)
rdepth = self.dfs(cur.right)
self.m = max(self.m, ldepth + rdepth)
return max(ldepth, rdepth) + 1
else:
return 0
def diameterOfBinaryTree(self, root):
"""
:type root: TreeNode
:rtype: int
"""
self.m = 0
self.dfs(root)
return self.m
|
<commit_before><commit_msg>Add py solution for 543. Diameter of Binary Tree
543. Diameter of Binary Tree: https://leetcode.com/problems/diameter-of-binary-tree/<commit_after>
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def dfs(self, cur):
if cur:
ldepth = self.dfs(cur.left)
rdepth = self.dfs(cur.right)
self.m = max(self.m, ldepth + rdepth)
return max(ldepth, rdepth) + 1
else:
return 0
def diameterOfBinaryTree(self, root):
"""
:type root: TreeNode
:rtype: int
"""
self.m = 0
self.dfs(root)
return self.m
|
Add py solution for 543. Diameter of Binary Tree
543. Diameter of Binary Tree: https://leetcode.com/problems/diameter-of-binary-tree/# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def dfs(self, cur):
if cur:
ldepth = self.dfs(cur.left)
rdepth = self.dfs(cur.right)
self.m = max(self.m, ldepth + rdepth)
return max(ldepth, rdepth) + 1
else:
return 0
def diameterOfBinaryTree(self, root):
"""
:type root: TreeNode
:rtype: int
"""
self.m = 0
self.dfs(root)
return self.m
|
<commit_before><commit_msg>Add py solution for 543. Diameter of Binary Tree
543. Diameter of Binary Tree: https://leetcode.com/problems/diameter-of-binary-tree/<commit_after># Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def dfs(self, cur):
if cur:
ldepth = self.dfs(cur.left)
rdepth = self.dfs(cur.right)
self.m = max(self.m, ldepth + rdepth)
return max(ldepth, rdepth) + 1
else:
return 0
def diameterOfBinaryTree(self, root):
"""
:type root: TreeNode
:rtype: int
"""
self.m = 0
self.dfs(root)
return self.m
|
|
f01ce359618e66163f280eb386d70e1350addbc6
|
py/top-k-frequent-elements.py
|
py/top-k-frequent-elements.py
|
import heapq
from operator import itemgetter
from collections import Counter
class Solution(object):
def topKFrequent(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: List[int]
"""
c = Counter(nums)
return map(itemgetter(1), heapq.nlargest(k, [(cnt, n) for n, cnt in c.iteritems()]))
|
Add py solution for 347. Top K Frequent Elements
|
Add py solution for 347. Top K Frequent Elements
347. Top K Frequent Elements: https://leetcode.com/problems/top-k-frequent-elements/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 347. Top K Frequent Elements
347. Top K Frequent Elements: https://leetcode.com/problems/top-k-frequent-elements/
|
import heapq
from operator import itemgetter
from collections import Counter
class Solution(object):
def topKFrequent(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: List[int]
"""
c = Counter(nums)
return map(itemgetter(1), heapq.nlargest(k, [(cnt, n) for n, cnt in c.iteritems()]))
|
<commit_before><commit_msg>Add py solution for 347. Top K Frequent Elements
347. Top K Frequent Elements: https://leetcode.com/problems/top-k-frequent-elements/<commit_after>
|
import heapq
from operator import itemgetter
from collections import Counter
class Solution(object):
def topKFrequent(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: List[int]
"""
c = Counter(nums)
return map(itemgetter(1), heapq.nlargest(k, [(cnt, n) for n, cnt in c.iteritems()]))
|
Add py solution for 347. Top K Frequent Elements
347. Top K Frequent Elements: https://leetcode.com/problems/top-k-frequent-elements/import heapq
from operator import itemgetter
from collections import Counter
class Solution(object):
def topKFrequent(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: List[int]
"""
c = Counter(nums)
return map(itemgetter(1), heapq.nlargest(k, [(cnt, n) for n, cnt in c.iteritems()]))
|
<commit_before><commit_msg>Add py solution for 347. Top K Frequent Elements
347. Top K Frequent Elements: https://leetcode.com/problems/top-k-frequent-elements/<commit_after>import heapq
from operator import itemgetter
from collections import Counter
class Solution(object):
def topKFrequent(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: List[int]
"""
c = Counter(nums)
return map(itemgetter(1), heapq.nlargest(k, [(cnt, n) for n, cnt in c.iteritems()]))
|
|
eb84efd25b8c5e9ba06434ad78c9611b3f8350f0
|
every_election/apps/elections/management/commands/add_tags.py
|
every_election/apps/elections/management/commands/add_tags.py
|
import json
from django.contrib.gis.gdal import DataSource
from django.core.management import BaseCommand
from django.db.models import Value
from core.mixins import ReadFromFileMixin
from core.models import JsonbSet
from elections.models import Election
def get_layer(data, layer_index=0):
data_source = DataSource(data.name)
if len(data_source) < layer_index + 1:
raise ValueError(f"Expected layer at index: {layer_index}, None found")
return data_source[layer_index]
class Command(ReadFromFileMixin, BaseCommand):
help = """
Update tags field on Election model.
Example usage:
python manage.py add_tags -u 'https://opendata.arcgis.com/datasets/01fd6b2d7600446d8af768005992f76a_2.geojson' --fields '{"NUTS118NM": "value", "NUTS118CD": "key"}' --tag-name NUTS1
"""
# TODO: add a flag to make overwriting optional - or at least warn about it.
# TODO: add some way of filtering which elections to apply it too.
# TODO: make it possible to get a layer by name.
def add_arguments(self, parser):
parser.add_argument(
"--fields",
action="store",
dest="field_map",
help="A map of fields in the form: {'field name in source':'name to store'}",
required=True,
)
parser.add_argument(
"--tag-name",
action="store",
dest="tag_name",
help="Name of tag to store",
required=True,
)
parser.add_argument(
"--layer-index",
action="store",
default=0,
type=int,
dest="layer_index",
help="Index of layer in dataset",
)
super().add_arguments(parser)
def handle(self, *args, **options):
field_map = json.loads(options["field_map"])
tag_name = options["tag_name"]
self.stdout.write("Loading data...")
data = self.load_data(options)
self.stdout.write("...data loaded.")
layer = get_layer(data, options["layer_index"])
self.stdout.write(f"Reading data from {layer.name}")
for feature in layer:
tags = {}
for field in field_map:
tags[field_map[field]] = feature.get(field)
self.stdout.write(f"Setting tags: {tag_name} to {tags}...")
ballots = Election.private_objects.ballots_with_point_in_area(
feature.geom.geos
)
self.stdout.write(f"...for {len(ballots)} ballots...")
ballots.update(
tags=JsonbSet(
"tags", Value(f"{{{tag_name}}}"), Value(json.dumps(tags)), True
)
)
self.stdout.write("...done.")
|
Add command to add tags to Election model
|
Add command to add tags to Election model
|
Python
|
bsd-3-clause
|
DemocracyClub/EveryElection,DemocracyClub/EveryElection,DemocracyClub/EveryElection
|
Add command to add tags to Election model
|
import json
from django.contrib.gis.gdal import DataSource
from django.core.management import BaseCommand
from django.db.models import Value
from core.mixins import ReadFromFileMixin
from core.models import JsonbSet
from elections.models import Election
def get_layer(data, layer_index=0):
data_source = DataSource(data.name)
if len(data_source) < layer_index + 1:
raise ValueError(f"Expected layer at index: {layer_index}, None found")
return data_source[layer_index]
class Command(ReadFromFileMixin, BaseCommand):
help = """
Update tags field on Election model.
Example usage:
python manage.py add_tags -u 'https://opendata.arcgis.com/datasets/01fd6b2d7600446d8af768005992f76a_2.geojson' --fields '{"NUTS118NM": "value", "NUTS118CD": "key"}' --tag-name NUTS1
"""
# TODO: add a flag to make overwriting optional - or at least warn about it.
# TODO: add some way of filtering which elections to apply it too.
# TODO: make it possible to get a layer by name.
def add_arguments(self, parser):
parser.add_argument(
"--fields",
action="store",
dest="field_map",
help="A map of fields in the form: {'field name in source':'name to store'}",
required=True,
)
parser.add_argument(
"--tag-name",
action="store",
dest="tag_name",
help="Name of tag to store",
required=True,
)
parser.add_argument(
"--layer-index",
action="store",
default=0,
type=int,
dest="layer_index",
help="Index of layer in dataset",
)
super().add_arguments(parser)
def handle(self, *args, **options):
field_map = json.loads(options["field_map"])
tag_name = options["tag_name"]
self.stdout.write("Loading data...")
data = self.load_data(options)
self.stdout.write("...data loaded.")
layer = get_layer(data, options["layer_index"])
self.stdout.write(f"Reading data from {layer.name}")
for feature in layer:
tags = {}
for field in field_map:
tags[field_map[field]] = feature.get(field)
self.stdout.write(f"Setting tags: {tag_name} to {tags}...")
ballots = Election.private_objects.ballots_with_point_in_area(
feature.geom.geos
)
self.stdout.write(f"...for {len(ballots)} ballots...")
ballots.update(
tags=JsonbSet(
"tags", Value(f"{{{tag_name}}}"), Value(json.dumps(tags)), True
)
)
self.stdout.write("...done.")
|
<commit_before><commit_msg>Add command to add tags to Election model<commit_after>
|
import json
from django.contrib.gis.gdal import DataSource
from django.core.management import BaseCommand
from django.db.models import Value
from core.mixins import ReadFromFileMixin
from core.models import JsonbSet
from elections.models import Election
def get_layer(data, layer_index=0):
data_source = DataSource(data.name)
if len(data_source) < layer_index + 1:
raise ValueError(f"Expected layer at index: {layer_index}, None found")
return data_source[layer_index]
class Command(ReadFromFileMixin, BaseCommand):
help = """
Update tags field on Election model.
Example usage:
python manage.py add_tags -u 'https://opendata.arcgis.com/datasets/01fd6b2d7600446d8af768005992f76a_2.geojson' --fields '{"NUTS118NM": "value", "NUTS118CD": "key"}' --tag-name NUTS1
"""
# TODO: add a flag to make overwriting optional - or at least warn about it.
# TODO: add some way of filtering which elections to apply it too.
# TODO: make it possible to get a layer by name.
def add_arguments(self, parser):
parser.add_argument(
"--fields",
action="store",
dest="field_map",
help="A map of fields in the form: {'field name in source':'name to store'}",
required=True,
)
parser.add_argument(
"--tag-name",
action="store",
dest="tag_name",
help="Name of tag to store",
required=True,
)
parser.add_argument(
"--layer-index",
action="store",
default=0,
type=int,
dest="layer_index",
help="Index of layer in dataset",
)
super().add_arguments(parser)
def handle(self, *args, **options):
field_map = json.loads(options["field_map"])
tag_name = options["tag_name"]
self.stdout.write("Loading data...")
data = self.load_data(options)
self.stdout.write("...data loaded.")
layer = get_layer(data, options["layer_index"])
self.stdout.write(f"Reading data from {layer.name}")
for feature in layer:
tags = {}
for field in field_map:
tags[field_map[field]] = feature.get(field)
self.stdout.write(f"Setting tags: {tag_name} to {tags}...")
ballots = Election.private_objects.ballots_with_point_in_area(
feature.geom.geos
)
self.stdout.write(f"...for {len(ballots)} ballots...")
ballots.update(
tags=JsonbSet(
"tags", Value(f"{{{tag_name}}}"), Value(json.dumps(tags)), True
)
)
self.stdout.write("...done.")
|
Add command to add tags to Election modelimport json
from django.contrib.gis.gdal import DataSource
from django.core.management import BaseCommand
from django.db.models import Value
from core.mixins import ReadFromFileMixin
from core.models import JsonbSet
from elections.models import Election
def get_layer(data, layer_index=0):
data_source = DataSource(data.name)
if len(data_source) < layer_index + 1:
raise ValueError(f"Expected layer at index: {layer_index}, None found")
return data_source[layer_index]
class Command(ReadFromFileMixin, BaseCommand):
help = """
Update tags field on Election model.
Example usage:
python manage.py add_tags -u 'https://opendata.arcgis.com/datasets/01fd6b2d7600446d8af768005992f76a_2.geojson' --fields '{"NUTS118NM": "value", "NUTS118CD": "key"}' --tag-name NUTS1
"""
# TODO: add a flag to make overwriting optional - or at least warn about it.
# TODO: add some way of filtering which elections to apply it too.
# TODO: make it possible to get a layer by name.
def add_arguments(self, parser):
parser.add_argument(
"--fields",
action="store",
dest="field_map",
help="A map of fields in the form: {'field name in source':'name to store'}",
required=True,
)
parser.add_argument(
"--tag-name",
action="store",
dest="tag_name",
help="Name of tag to store",
required=True,
)
parser.add_argument(
"--layer-index",
action="store",
default=0,
type=int,
dest="layer_index",
help="Index of layer in dataset",
)
super().add_arguments(parser)
def handle(self, *args, **options):
field_map = json.loads(options["field_map"])
tag_name = options["tag_name"]
self.stdout.write("Loading data...")
data = self.load_data(options)
self.stdout.write("...data loaded.")
layer = get_layer(data, options["layer_index"])
self.stdout.write(f"Reading data from {layer.name}")
for feature in layer:
tags = {}
for field in field_map:
tags[field_map[field]] = feature.get(field)
self.stdout.write(f"Setting tags: {tag_name} to {tags}...")
ballots = Election.private_objects.ballots_with_point_in_area(
feature.geom.geos
)
self.stdout.write(f"...for {len(ballots)} ballots...")
ballots.update(
tags=JsonbSet(
"tags", Value(f"{{{tag_name}}}"), Value(json.dumps(tags)), True
)
)
self.stdout.write("...done.")
|
<commit_before><commit_msg>Add command to add tags to Election model<commit_after>import json
from django.contrib.gis.gdal import DataSource
from django.core.management import BaseCommand
from django.db.models import Value
from core.mixins import ReadFromFileMixin
from core.models import JsonbSet
from elections.models import Election
def get_layer(data, layer_index=0):
data_source = DataSource(data.name)
if len(data_source) < layer_index + 1:
raise ValueError(f"Expected layer at index: {layer_index}, None found")
return data_source[layer_index]
class Command(ReadFromFileMixin, BaseCommand):
help = """
Update tags field on Election model.
Example usage:
python manage.py add_tags -u 'https://opendata.arcgis.com/datasets/01fd6b2d7600446d8af768005992f76a_2.geojson' --fields '{"NUTS118NM": "value", "NUTS118CD": "key"}' --tag-name NUTS1
"""
# TODO: add a flag to make overwriting optional - or at least warn about it.
# TODO: add some way of filtering which elections to apply it too.
# TODO: make it possible to get a layer by name.
def add_arguments(self, parser):
parser.add_argument(
"--fields",
action="store",
dest="field_map",
help="A map of fields in the form: {'field name in source':'name to store'}",
required=True,
)
parser.add_argument(
"--tag-name",
action="store",
dest="tag_name",
help="Name of tag to store",
required=True,
)
parser.add_argument(
"--layer-index",
action="store",
default=0,
type=int,
dest="layer_index",
help="Index of layer in dataset",
)
super().add_arguments(parser)
def handle(self, *args, **options):
field_map = json.loads(options["field_map"])
tag_name = options["tag_name"]
self.stdout.write("Loading data...")
data = self.load_data(options)
self.stdout.write("...data loaded.")
layer = get_layer(data, options["layer_index"])
self.stdout.write(f"Reading data from {layer.name}")
for feature in layer:
tags = {}
for field in field_map:
tags[field_map[field]] = feature.get(field)
self.stdout.write(f"Setting tags: {tag_name} to {tags}...")
ballots = Election.private_objects.ballots_with_point_in_area(
feature.geom.geos
)
self.stdout.write(f"...for {len(ballots)} ballots...")
ballots.update(
tags=JsonbSet(
"tags", Value(f"{{{tag_name}}}"), Value(json.dumps(tags)), True
)
)
self.stdout.write("...done.")
|
|
cac06b93f257afa2760180074a978cffdc05c8c2
|
examples/kiwilist.py
|
examples/kiwilist.py
|
import gtk
from kiwi.ui.widgets.list import Column, List, SequentialColumn
class Person:
"""The parameters need to be of the same name of the column headers"""
def __init__(self, name, age, city, present):
(self.name, self.age,
self.city, self.present) = name, age, city, present
def __repr__(self):
return '<Person %s>' % self.name
class MyColumn(Column):
pass
def format_func(age):
if age % 2 == 0:
return float(age)
return age
columns = [
SequentialColumn(),
MyColumn('name', tooltip='What about a stupid tooltip?', editable=True),
Column('age', format_func=format_func, editable=True),
Column('city', visible=True, sorted=True),
]
data = (Person('Evandro', 23, 'Belo Horizonte', True),
Person('Daniel', 22, 'Sao Carlos', False),
Person('Henrique', 21, 'Sao Carlos', True),
Person('Gustavo', 23, 'San Jose do Santos', False),
Person('Johan', 23, 'Goteborg', True),
Person('Lorenzo', 26, 'Granada', False)
)
win = gtk.Window()
win.set_default_size(300, 150)
win.connect('destroy', gtk.main_quit)
l = List(columns, data)
l.add_list([Person('Nando', 29+len(l), 'Santos', True)], False)
# add an extra person
win.add(l)
win.show_all()
gtk.main()
|
Add a small kiwi list example
|
Add a small kiwi list example
|
Python
|
lgpl-2.1
|
stoq/kiwi
|
Add a small kiwi list example
|
import gtk
from kiwi.ui.widgets.list import Column, List, SequentialColumn
class Person:
"""The parameters need to be of the same name of the column headers"""
def __init__(self, name, age, city, present):
(self.name, self.age,
self.city, self.present) = name, age, city, present
def __repr__(self):
return '<Person %s>' % self.name
class MyColumn(Column):
pass
def format_func(age):
if age % 2 == 0:
return float(age)
return age
columns = [
SequentialColumn(),
MyColumn('name', tooltip='What about a stupid tooltip?', editable=True),
Column('age', format_func=format_func, editable=True),
Column('city', visible=True, sorted=True),
]
data = (Person('Evandro', 23, 'Belo Horizonte', True),
Person('Daniel', 22, 'Sao Carlos', False),
Person('Henrique', 21, 'Sao Carlos', True),
Person('Gustavo', 23, 'San Jose do Santos', False),
Person('Johan', 23, 'Goteborg', True),
Person('Lorenzo', 26, 'Granada', False)
)
win = gtk.Window()
win.set_default_size(300, 150)
win.connect('destroy', gtk.main_quit)
l = List(columns, data)
l.add_list([Person('Nando', 29+len(l), 'Santos', True)], False)
# add an extra person
win.add(l)
win.show_all()
gtk.main()
|
<commit_before><commit_msg>Add a small kiwi list example<commit_after>
|
import gtk
from kiwi.ui.widgets.list import Column, List, SequentialColumn
class Person:
"""The parameters need to be of the same name of the column headers"""
def __init__(self, name, age, city, present):
(self.name, self.age,
self.city, self.present) = name, age, city, present
def __repr__(self):
return '<Person %s>' % self.name
class MyColumn(Column):
pass
def format_func(age):
if age % 2 == 0:
return float(age)
return age
columns = [
SequentialColumn(),
MyColumn('name', tooltip='What about a stupid tooltip?', editable=True),
Column('age', format_func=format_func, editable=True),
Column('city', visible=True, sorted=True),
]
data = (Person('Evandro', 23, 'Belo Horizonte', True),
Person('Daniel', 22, 'Sao Carlos', False),
Person('Henrique', 21, 'Sao Carlos', True),
Person('Gustavo', 23, 'San Jose do Santos', False),
Person('Johan', 23, 'Goteborg', True),
Person('Lorenzo', 26, 'Granada', False)
)
win = gtk.Window()
win.set_default_size(300, 150)
win.connect('destroy', gtk.main_quit)
l = List(columns, data)
l.add_list([Person('Nando', 29+len(l), 'Santos', True)], False)
# add an extra person
win.add(l)
win.show_all()
gtk.main()
|
Add a small kiwi list exampleimport gtk
from kiwi.ui.widgets.list import Column, List, SequentialColumn
class Person:
"""The parameters need to be of the same name of the column headers"""
def __init__(self, name, age, city, present):
(self.name, self.age,
self.city, self.present) = name, age, city, present
def __repr__(self):
return '<Person %s>' % self.name
class MyColumn(Column):
pass
def format_func(age):
if age % 2 == 0:
return float(age)
return age
columns = [
SequentialColumn(),
MyColumn('name', tooltip='What about a stupid tooltip?', editable=True),
Column('age', format_func=format_func, editable=True),
Column('city', visible=True, sorted=True),
]
data = (Person('Evandro', 23, 'Belo Horizonte', True),
Person('Daniel', 22, 'Sao Carlos', False),
Person('Henrique', 21, 'Sao Carlos', True),
Person('Gustavo', 23, 'San Jose do Santos', False),
Person('Johan', 23, 'Goteborg', True),
Person('Lorenzo', 26, 'Granada', False)
)
win = gtk.Window()
win.set_default_size(300, 150)
win.connect('destroy', gtk.main_quit)
l = List(columns, data)
l.add_list([Person('Nando', 29+len(l), 'Santos', True)], False)
# add an extra person
win.add(l)
win.show_all()
gtk.main()
|
<commit_before><commit_msg>Add a small kiwi list example<commit_after>import gtk
from kiwi.ui.widgets.list import Column, List, SequentialColumn
class Person:
"""The parameters need to be of the same name of the column headers"""
def __init__(self, name, age, city, present):
(self.name, self.age,
self.city, self.present) = name, age, city, present
def __repr__(self):
return '<Person %s>' % self.name
class MyColumn(Column):
pass
def format_func(age):
if age % 2 == 0:
return float(age)
return age
columns = [
SequentialColumn(),
MyColumn('name', tooltip='What about a stupid tooltip?', editable=True),
Column('age', format_func=format_func, editable=True),
Column('city', visible=True, sorted=True),
]
data = (Person('Evandro', 23, 'Belo Horizonte', True),
Person('Daniel', 22, 'Sao Carlos', False),
Person('Henrique', 21, 'Sao Carlos', True),
Person('Gustavo', 23, 'San Jose do Santos', False),
Person('Johan', 23, 'Goteborg', True),
Person('Lorenzo', 26, 'Granada', False)
)
win = gtk.Window()
win.set_default_size(300, 150)
win.connect('destroy', gtk.main_quit)
l = List(columns, data)
l.add_list([Person('Nando', 29+len(l), 'Santos', True)], False)
# add an extra person
win.add(l)
win.show_all()
gtk.main()
|
|
dae41df9835a83a71b71b5e4b64561761c404bf5
|
mp3-formatter/url_scrape_div.py
|
mp3-formatter/url_scrape_div.py
|
#!/usr/bin/python3
# sudo apt-get install python3-pip
# pip3 install requests
import lxml.html
import requests
import sys
def validate_url(url):
if not url:
raise SystemError("validate_url() was given an empty URL")
protocol = "http://"
protocol_error_message = ValueError("A URL beginning with " \
"'http://' is required")
if len(url) < len(protocol):
raise protocol_error_message
if url[:len(protocol)] != protocol:
raise protocol_error_message
def scrape_div(url, div_id):
div_id_lookup_string = '//div[contains(@id, "' + div_id + '")]'
try:
html_page = requests.get(url)
except:
e = sys.exc_info()[0]
raise ValueError("Request could not be completed. Perhaps the " \
"URL provided was invalid?")
html_page.raise_for_status()
html_tree = lxml.html.fromstring(html_page.content)
content = html_tree.xpath(div_id_lookup_string)
if len(content) < 1:
raise LookupError("The requested div could not be found")
elif len(content) > 1:
raise LookupError("More than one of the requested divs were found")
return str(content[0].text_content())
# A line is determined to be the name of a track if it begins with a number
def extract_tracklist_begin_num(content):
tracklist = []
for line in content.splitlines():
# Empty line
if not line:
continue
# Strip leading and trailing whitespace
line.lstrip()
line.rstrip()
if line[0].isdigit():
tracklist.append(line)
return tracklist
# Removes leading numbers and whitespace
def strip_leading_index(tracklist):
tracklist_new = []
for track in tracklist:
for i in range(len(track)):
if track[i].isdigit() or track[i] == " ":
i += 1
else:
tracklist_new.append(track[i:])
tracklist[tracklist.index(track)] = track[i:]
break
if len(sys.argv) < 2:
raise RuntimeError("Please provide the URL to the page with "\
"the target tracklist")
url = sys.argv[1] # sys.argv[0] is the name of this script
validate_url(url)
div_id = "stcpDiv"
content = scrape_div(url, div_id)
tracklist = extract_tracklist_begin_num(content)
strip_leading_index(tracklist)
for track in tracklist:
print(track)
|
Add script to scrape tracklist from URL
|
MP3: Add script to scrape tracklist from URL
I didn't do this in separate commits because I had such a rough time
extracting the div I needed from the page.
|
Python
|
mit
|
jleung51/scripts,jleung51/scripts,jleung51/scripts
|
MP3: Add script to scrape tracklist from URL
I didn't do this in separate commits because I had such a rough time
extracting the div I needed from the page.
|
#!/usr/bin/python3
# sudo apt-get install python3-pip
# pip3 install requests
import lxml.html
import requests
import sys
def validate_url(url):
if not url:
raise SystemError("validate_url() was given an empty URL")
protocol = "http://"
protocol_error_message = ValueError("A URL beginning with " \
"'http://' is required")
if len(url) < len(protocol):
raise protocol_error_message
if url[:len(protocol)] != protocol:
raise protocol_error_message
def scrape_div(url, div_id):
div_id_lookup_string = '//div[contains(@id, "' + div_id + '")]'
try:
html_page = requests.get(url)
except:
e = sys.exc_info()[0]
raise ValueError("Request could not be completed. Perhaps the " \
"URL provided was invalid?")
html_page.raise_for_status()
html_tree = lxml.html.fromstring(html_page.content)
content = html_tree.xpath(div_id_lookup_string)
if len(content) < 1:
raise LookupError("The requested div could not be found")
elif len(content) > 1:
raise LookupError("More than one of the requested divs were found")
return str(content[0].text_content())
# A line is determined to be the name of a track if it begins with a number
def extract_tracklist_begin_num(content):
tracklist = []
for line in content.splitlines():
# Empty line
if not line:
continue
# Strip leading and trailing whitespace
line.lstrip()
line.rstrip()
if line[0].isdigit():
tracklist.append(line)
return tracklist
# Removes leading numbers and whitespace
def strip_leading_index(tracklist):
tracklist_new = []
for track in tracklist:
for i in range(len(track)):
if track[i].isdigit() or track[i] == " ":
i += 1
else:
tracklist_new.append(track[i:])
tracklist[tracklist.index(track)] = track[i:]
break
if len(sys.argv) < 2:
raise RuntimeError("Please provide the URL to the page with "\
"the target tracklist")
url = sys.argv[1] # sys.argv[0] is the name of this script
validate_url(url)
div_id = "stcpDiv"
content = scrape_div(url, div_id)
tracklist = extract_tracklist_begin_num(content)
strip_leading_index(tracklist)
for track in tracklist:
print(track)
|
<commit_before><commit_msg>MP3: Add script to scrape tracklist from URL
I didn't do this in separate commits because I had such a rough time
extracting the div I needed from the page.<commit_after>
|
#!/usr/bin/python3
# sudo apt-get install python3-pip
# pip3 install requests
import lxml.html
import requests
import sys
def validate_url(url):
if not url:
raise SystemError("validate_url() was given an empty URL")
protocol = "http://"
protocol_error_message = ValueError("A URL beginning with " \
"'http://' is required")
if len(url) < len(protocol):
raise protocol_error_message
if url[:len(protocol)] != protocol:
raise protocol_error_message
def scrape_div(url, div_id):
div_id_lookup_string = '//div[contains(@id, "' + div_id + '")]'
try:
html_page = requests.get(url)
except:
e = sys.exc_info()[0]
raise ValueError("Request could not be completed. Perhaps the " \
"URL provided was invalid?")
html_page.raise_for_status()
html_tree = lxml.html.fromstring(html_page.content)
content = html_tree.xpath(div_id_lookup_string)
if len(content) < 1:
raise LookupError("The requested div could not be found")
elif len(content) > 1:
raise LookupError("More than one of the requested divs were found")
return str(content[0].text_content())
# A line is determined to be the name of a track if it begins with a number
def extract_tracklist_begin_num(content):
tracklist = []
for line in content.splitlines():
# Empty line
if not line:
continue
# Strip leading and trailing whitespace
line.lstrip()
line.rstrip()
if line[0].isdigit():
tracklist.append(line)
return tracklist
# Removes leading numbers and whitespace
def strip_leading_index(tracklist):
tracklist_new = []
for track in tracklist:
for i in range(len(track)):
if track[i].isdigit() or track[i] == " ":
i += 1
else:
tracklist_new.append(track[i:])
tracklist[tracklist.index(track)] = track[i:]
break
if len(sys.argv) < 2:
raise RuntimeError("Please provide the URL to the page with "\
"the target tracklist")
url = sys.argv[1] # sys.argv[0] is the name of this script
validate_url(url)
div_id = "stcpDiv"
content = scrape_div(url, div_id)
tracklist = extract_tracklist_begin_num(content)
strip_leading_index(tracklist)
for track in tracklist:
print(track)
|
MP3: Add script to scrape tracklist from URL
I didn't do this in separate commits because I had such a rough time
extracting the div I needed from the page.#!/usr/bin/python3
# sudo apt-get install python3-pip
# pip3 install requests
import lxml.html
import requests
import sys
def validate_url(url):
if not url:
raise SystemError("validate_url() was given an empty URL")
protocol = "http://"
protocol_error_message = ValueError("A URL beginning with " \
"'http://' is required")
if len(url) < len(protocol):
raise protocol_error_message
if url[:len(protocol)] != protocol:
raise protocol_error_message
def scrape_div(url, div_id):
div_id_lookup_string = '//div[contains(@id, "' + div_id + '")]'
try:
html_page = requests.get(url)
except:
e = sys.exc_info()[0]
raise ValueError("Request could not be completed. Perhaps the " \
"URL provided was invalid?")
html_page.raise_for_status()
html_tree = lxml.html.fromstring(html_page.content)
content = html_tree.xpath(div_id_lookup_string)
if len(content) < 1:
raise LookupError("The requested div could not be found")
elif len(content) > 1:
raise LookupError("More than one of the requested divs were found")
return str(content[0].text_content())
# A line is determined to be the name of a track if it begins with a number
def extract_tracklist_begin_num(content):
tracklist = []
for line in content.splitlines():
# Empty line
if not line:
continue
# Strip leading and trailing whitespace
line.lstrip()
line.rstrip()
if line[0].isdigit():
tracklist.append(line)
return tracklist
# Removes leading numbers and whitespace
def strip_leading_index(tracklist):
tracklist_new = []
for track in tracklist:
for i in range(len(track)):
if track[i].isdigit() or track[i] == " ":
i += 1
else:
tracklist_new.append(track[i:])
tracklist[tracklist.index(track)] = track[i:]
break
if len(sys.argv) < 2:
raise RuntimeError("Please provide the URL to the page with "\
"the target tracklist")
url = sys.argv[1] # sys.argv[0] is the name of this script
validate_url(url)
div_id = "stcpDiv"
content = scrape_div(url, div_id)
tracklist = extract_tracklist_begin_num(content)
strip_leading_index(tracklist)
for track in tracklist:
print(track)
|
<commit_before><commit_msg>MP3: Add script to scrape tracklist from URL
I didn't do this in separate commits because I had such a rough time
extracting the div I needed from the page.<commit_after>#!/usr/bin/python3
# sudo apt-get install python3-pip
# pip3 install requests
import lxml.html
import requests
import sys
def validate_url(url):
if not url:
raise SystemError("validate_url() was given an empty URL")
protocol = "http://"
protocol_error_message = ValueError("A URL beginning with " \
"'http://' is required")
if len(url) < len(protocol):
raise protocol_error_message
if url[:len(protocol)] != protocol:
raise protocol_error_message
def scrape_div(url, div_id):
div_id_lookup_string = '//div[contains(@id, "' + div_id + '")]'
try:
html_page = requests.get(url)
except:
e = sys.exc_info()[0]
raise ValueError("Request could not be completed. Perhaps the " \
"URL provided was invalid?")
html_page.raise_for_status()
html_tree = lxml.html.fromstring(html_page.content)
content = html_tree.xpath(div_id_lookup_string)
if len(content) < 1:
raise LookupError("The requested div could not be found")
elif len(content) > 1:
raise LookupError("More than one of the requested divs were found")
return str(content[0].text_content())
# A line is determined to be the name of a track if it begins with a number
def extract_tracklist_begin_num(content):
tracklist = []
for line in content.splitlines():
# Empty line
if not line:
continue
# Strip leading and trailing whitespace
line.lstrip()
line.rstrip()
if line[0].isdigit():
tracklist.append(line)
return tracklist
# Removes leading numbers and whitespace
def strip_leading_index(tracklist):
tracklist_new = []
for track in tracklist:
for i in range(len(track)):
if track[i].isdigit() or track[i] == " ":
i += 1
else:
tracklist_new.append(track[i:])
tracklist[tracklist.index(track)] = track[i:]
break
if len(sys.argv) < 2:
raise RuntimeError("Please provide the URL to the page with "\
"the target tracklist")
url = sys.argv[1] # sys.argv[0] is the name of this script
validate_url(url)
div_id = "stcpDiv"
content = scrape_div(url, div_id)
tracklist = extract_tracklist_begin_num(content)
strip_leading_index(tracklist)
for track in tracklist:
print(track)
|
|
25fb55ed7d90834d36f0f536f4324facbb5db710
|
examples/play_tvz.py
|
examples/play_tvz.py
|
import sc2
from sc2 import Race
from sc2.player import Human, Bot
from zerg_rush import ZergRushBot
def main():
sc2.run_game(sc2.maps.get("Abyssal Reef LE"), [
Human(Race.Terran),
Bot(Race.Zerg, ZergRushBot())
], realtime=True)
if __name__ == '__main__':
main()
|
Add TvZ Human vs AI example
|
Add TvZ Human vs AI example
|
Python
|
mit
|
Dentosal/python-sc2
|
Add TvZ Human vs AI example
|
import sc2
from sc2 import Race
from sc2.player import Human, Bot
from zerg_rush import ZergRushBot
def main():
sc2.run_game(sc2.maps.get("Abyssal Reef LE"), [
Human(Race.Terran),
Bot(Race.Zerg, ZergRushBot())
], realtime=True)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add TvZ Human vs AI example<commit_after>
|
import sc2
from sc2 import Race
from sc2.player import Human, Bot
from zerg_rush import ZergRushBot
def main():
sc2.run_game(sc2.maps.get("Abyssal Reef LE"), [
Human(Race.Terran),
Bot(Race.Zerg, ZergRushBot())
], realtime=True)
if __name__ == '__main__':
main()
|
Add TvZ Human vs AI exampleimport sc2
from sc2 import Race
from sc2.player import Human, Bot
from zerg_rush import ZergRushBot
def main():
sc2.run_game(sc2.maps.get("Abyssal Reef LE"), [
Human(Race.Terran),
Bot(Race.Zerg, ZergRushBot())
], realtime=True)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add TvZ Human vs AI example<commit_after>import sc2
from sc2 import Race
from sc2.player import Human, Bot
from zerg_rush import ZergRushBot
def main():
sc2.run_game(sc2.maps.get("Abyssal Reef LE"), [
Human(Race.Terran),
Bot(Race.Zerg, ZergRushBot())
], realtime=True)
if __name__ == '__main__':
main()
|
|
b0b4cb81874f4e7d347baaf292b58dad1af6bbb3
|
perftest.py
|
perftest.py
|
"""
Simple peformance tests.
"""
import sys
import time
import couchdb
def main():
print 'sys.version : %r' % (sys.version,)
print 'sys.platform : %r' % (sys.platform,)
tests = [create_doc, create_bulk_docs]
if len(sys.argv) > 1:
tests = [test for test in tests if test.__name__ in sys.argv[1:]]
server = couchdb.Server()
for test in tests:
_run(server, test)
def _run(server, func):
"""Run a test in a clean db and log its execution time."""
sys.stdout.write("* [%s] %s ... " % (func.__name__, func.__doc__.strip()))
sys.stdout.flush()
db_name = 'couchdb-python/perftest'
db = server.create(db_name)
try:
start = time.time()
func(db)
stop = time.time()
sys.stdout.write("%0.2fs\n" % (stop-start,))
sys.stdout.flush()
except Exception, e:
sys.stdout.write("FAILED - %r\n" % (unicode(e),))
sys.stdout.flush()
finally:
server.delete(db_name)
def create_doc(db):
"""Create lots of docs, one at a time"""
for i in range(1000):
db.save({'_id': unicode(i)})
def create_bulk_docs(db):
"""Create lots of docs, lots at a time"""
batch_size = 100
num_batches = 1000
for i in range(num_batches):
db.update([{'_id': unicode((i * batch_size) + j)} for j in range(batch_size)])
if __name__ == '__main__':
main()
|
Add a very simple performance testing tool.
|
Add a very simple performance testing tool.
|
Python
|
bsd-3-clause
|
erikdejonge/rabshakeh-couchdb-python-progress-attachments
|
Add a very simple performance testing tool.
|
"""
Simple peformance tests.
"""
import sys
import time
import couchdb
def main():
print 'sys.version : %r' % (sys.version,)
print 'sys.platform : %r' % (sys.platform,)
tests = [create_doc, create_bulk_docs]
if len(sys.argv) > 1:
tests = [test for test in tests if test.__name__ in sys.argv[1:]]
server = couchdb.Server()
for test in tests:
_run(server, test)
def _run(server, func):
"""Run a test in a clean db and log its execution time."""
sys.stdout.write("* [%s] %s ... " % (func.__name__, func.__doc__.strip()))
sys.stdout.flush()
db_name = 'couchdb-python/perftest'
db = server.create(db_name)
try:
start = time.time()
func(db)
stop = time.time()
sys.stdout.write("%0.2fs\n" % (stop-start,))
sys.stdout.flush()
except Exception, e:
sys.stdout.write("FAILED - %r\n" % (unicode(e),))
sys.stdout.flush()
finally:
server.delete(db_name)
def create_doc(db):
"""Create lots of docs, one at a time"""
for i in range(1000):
db.save({'_id': unicode(i)})
def create_bulk_docs(db):
"""Create lots of docs, lots at a time"""
batch_size = 100
num_batches = 1000
for i in range(num_batches):
db.update([{'_id': unicode((i * batch_size) + j)} for j in range(batch_size)])
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a very simple performance testing tool.<commit_after>
|
"""
Simple peformance tests.
"""
import sys
import time
import couchdb
def main():
print 'sys.version : %r' % (sys.version,)
print 'sys.platform : %r' % (sys.platform,)
tests = [create_doc, create_bulk_docs]
if len(sys.argv) > 1:
tests = [test for test in tests if test.__name__ in sys.argv[1:]]
server = couchdb.Server()
for test in tests:
_run(server, test)
def _run(server, func):
"""Run a test in a clean db and log its execution time."""
sys.stdout.write("* [%s] %s ... " % (func.__name__, func.__doc__.strip()))
sys.stdout.flush()
db_name = 'couchdb-python/perftest'
db = server.create(db_name)
try:
start = time.time()
func(db)
stop = time.time()
sys.stdout.write("%0.2fs\n" % (stop-start,))
sys.stdout.flush()
except Exception, e:
sys.stdout.write("FAILED - %r\n" % (unicode(e),))
sys.stdout.flush()
finally:
server.delete(db_name)
def create_doc(db):
"""Create lots of docs, one at a time"""
for i in range(1000):
db.save({'_id': unicode(i)})
def create_bulk_docs(db):
"""Create lots of docs, lots at a time"""
batch_size = 100
num_batches = 1000
for i in range(num_batches):
db.update([{'_id': unicode((i * batch_size) + j)} for j in range(batch_size)])
if __name__ == '__main__':
main()
|
Add a very simple performance testing tool."""
Simple peformance tests.
"""
import sys
import time
import couchdb
def main():
print 'sys.version : %r' % (sys.version,)
print 'sys.platform : %r' % (sys.platform,)
tests = [create_doc, create_bulk_docs]
if len(sys.argv) > 1:
tests = [test for test in tests if test.__name__ in sys.argv[1:]]
server = couchdb.Server()
for test in tests:
_run(server, test)
def _run(server, func):
"""Run a test in a clean db and log its execution time."""
sys.stdout.write("* [%s] %s ... " % (func.__name__, func.__doc__.strip()))
sys.stdout.flush()
db_name = 'couchdb-python/perftest'
db = server.create(db_name)
try:
start = time.time()
func(db)
stop = time.time()
sys.stdout.write("%0.2fs\n" % (stop-start,))
sys.stdout.flush()
except Exception, e:
sys.stdout.write("FAILED - %r\n" % (unicode(e),))
sys.stdout.flush()
finally:
server.delete(db_name)
def create_doc(db):
"""Create lots of docs, one at a time"""
for i in range(1000):
db.save({'_id': unicode(i)})
def create_bulk_docs(db):
"""Create lots of docs, lots at a time"""
batch_size = 100
num_batches = 1000
for i in range(num_batches):
db.update([{'_id': unicode((i * batch_size) + j)} for j in range(batch_size)])
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a very simple performance testing tool.<commit_after>"""
Simple peformance tests.
"""
import sys
import time
import couchdb
def main():
print 'sys.version : %r' % (sys.version,)
print 'sys.platform : %r' % (sys.platform,)
tests = [create_doc, create_bulk_docs]
if len(sys.argv) > 1:
tests = [test for test in tests if test.__name__ in sys.argv[1:]]
server = couchdb.Server()
for test in tests:
_run(server, test)
def _run(server, func):
"""Run a test in a clean db and log its execution time."""
sys.stdout.write("* [%s] %s ... " % (func.__name__, func.__doc__.strip()))
sys.stdout.flush()
db_name = 'couchdb-python/perftest'
db = server.create(db_name)
try:
start = time.time()
func(db)
stop = time.time()
sys.stdout.write("%0.2fs\n" % (stop-start,))
sys.stdout.flush()
except Exception, e:
sys.stdout.write("FAILED - %r\n" % (unicode(e),))
sys.stdout.flush()
finally:
server.delete(db_name)
def create_doc(db):
"""Create lots of docs, one at a time"""
for i in range(1000):
db.save({'_id': unicode(i)})
def create_bulk_docs(db):
"""Create lots of docs, lots at a time"""
batch_size = 100
num_batches = 1000
for i in range(num_batches):
db.update([{'_id': unicode((i * batch_size) + j)} for j in range(batch_size)])
if __name__ == '__main__':
main()
|
|
3c9fc8bbdbbcbca148a3b00c22a0e5fdaa9108aa
|
perftest.py
|
perftest.py
|
"""
Simple peformance tests.
"""
import sys
import time
import couchdb
def main():
print 'sys.version : %r' % (sys.version,)
print 'sys.platform : %r' % (sys.platform,)
tests = [create_doc, create_bulk_docs]
if len(sys.argv) > 1:
tests = [test for test in tests if test.__name__ in sys.argv[1:]]
server = couchdb.Server()
for test in tests:
_run(server, test)
def _run(server, func):
"""Run a test in a clean db and log its execution time."""
sys.stdout.write("* [%s] %s ... " % (func.__name__, func.__doc__.strip()))
sys.stdout.flush()
db_name = 'couchdb-python/perftest'
db = server.create(db_name)
try:
start = time.time()
func(db)
stop = time.time()
sys.stdout.write("%0.2fs\n" % (stop-start,))
sys.stdout.flush()
except Exception, e:
sys.stdout.write("FAILED - %r\n" % (unicode(e),))
sys.stdout.flush()
finally:
server.delete(db_name)
def create_doc(db):
"""Create lots of docs, one at a time"""
for i in range(1000):
db.save({'_id': unicode(i)})
def create_bulk_docs(db):
"""Create lots of docs, lots at a time"""
batch_size = 100
num_batches = 1000
for i in range(num_batches):
db.update([{'_id': unicode((i * batch_size) + j)} for j in range(batch_size)])
if __name__ == '__main__':
main()
|
Add a very simple performance testing tool.
|
Add a very simple performance testing tool.
|
Python
|
bsd-3-clause
|
gcarranza/couchdb-python,jur9526/couchdb-python
|
Add a very simple performance testing tool.
|
"""
Simple peformance tests.
"""
import sys
import time
import couchdb
def main():
print 'sys.version : %r' % (sys.version,)
print 'sys.platform : %r' % (sys.platform,)
tests = [create_doc, create_bulk_docs]
if len(sys.argv) > 1:
tests = [test for test in tests if test.__name__ in sys.argv[1:]]
server = couchdb.Server()
for test in tests:
_run(server, test)
def _run(server, func):
"""Run a test in a clean db and log its execution time."""
sys.stdout.write("* [%s] %s ... " % (func.__name__, func.__doc__.strip()))
sys.stdout.flush()
db_name = 'couchdb-python/perftest'
db = server.create(db_name)
try:
start = time.time()
func(db)
stop = time.time()
sys.stdout.write("%0.2fs\n" % (stop-start,))
sys.stdout.flush()
except Exception, e:
sys.stdout.write("FAILED - %r\n" % (unicode(e),))
sys.stdout.flush()
finally:
server.delete(db_name)
def create_doc(db):
"""Create lots of docs, one at a time"""
for i in range(1000):
db.save({'_id': unicode(i)})
def create_bulk_docs(db):
"""Create lots of docs, lots at a time"""
batch_size = 100
num_batches = 1000
for i in range(num_batches):
db.update([{'_id': unicode((i * batch_size) + j)} for j in range(batch_size)])
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a very simple performance testing tool.<commit_after>
|
"""
Simple peformance tests.
"""
import sys
import time
import couchdb
def main():
print 'sys.version : %r' % (sys.version,)
print 'sys.platform : %r' % (sys.platform,)
tests = [create_doc, create_bulk_docs]
if len(sys.argv) > 1:
tests = [test for test in tests if test.__name__ in sys.argv[1:]]
server = couchdb.Server()
for test in tests:
_run(server, test)
def _run(server, func):
"""Run a test in a clean db and log its execution time."""
sys.stdout.write("* [%s] %s ... " % (func.__name__, func.__doc__.strip()))
sys.stdout.flush()
db_name = 'couchdb-python/perftest'
db = server.create(db_name)
try:
start = time.time()
func(db)
stop = time.time()
sys.stdout.write("%0.2fs\n" % (stop-start,))
sys.stdout.flush()
except Exception, e:
sys.stdout.write("FAILED - %r\n" % (unicode(e),))
sys.stdout.flush()
finally:
server.delete(db_name)
def create_doc(db):
"""Create lots of docs, one at a time"""
for i in range(1000):
db.save({'_id': unicode(i)})
def create_bulk_docs(db):
"""Create lots of docs, lots at a time"""
batch_size = 100
num_batches = 1000
for i in range(num_batches):
db.update([{'_id': unicode((i * batch_size) + j)} for j in range(batch_size)])
if __name__ == '__main__':
main()
|
Add a very simple performance testing tool."""
Simple peformance tests.
"""
import sys
import time
import couchdb
def main():
print 'sys.version : %r' % (sys.version,)
print 'sys.platform : %r' % (sys.platform,)
tests = [create_doc, create_bulk_docs]
if len(sys.argv) > 1:
tests = [test for test in tests if test.__name__ in sys.argv[1:]]
server = couchdb.Server()
for test in tests:
_run(server, test)
def _run(server, func):
"""Run a test in a clean db and log its execution time."""
sys.stdout.write("* [%s] %s ... " % (func.__name__, func.__doc__.strip()))
sys.stdout.flush()
db_name = 'couchdb-python/perftest'
db = server.create(db_name)
try:
start = time.time()
func(db)
stop = time.time()
sys.stdout.write("%0.2fs\n" % (stop-start,))
sys.stdout.flush()
except Exception, e:
sys.stdout.write("FAILED - %r\n" % (unicode(e),))
sys.stdout.flush()
finally:
server.delete(db_name)
def create_doc(db):
"""Create lots of docs, one at a time"""
for i in range(1000):
db.save({'_id': unicode(i)})
def create_bulk_docs(db):
"""Create lots of docs, lots at a time"""
batch_size = 100
num_batches = 1000
for i in range(num_batches):
db.update([{'_id': unicode((i * batch_size) + j)} for j in range(batch_size)])
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a very simple performance testing tool.<commit_after>"""
Simple peformance tests.
"""
import sys
import time
import couchdb
def main():
print 'sys.version : %r' % (sys.version,)
print 'sys.platform : %r' % (sys.platform,)
tests = [create_doc, create_bulk_docs]
if len(sys.argv) > 1:
tests = [test for test in tests if test.__name__ in sys.argv[1:]]
server = couchdb.Server()
for test in tests:
_run(server, test)
def _run(server, func):
"""Run a test in a clean db and log its execution time."""
sys.stdout.write("* [%s] %s ... " % (func.__name__, func.__doc__.strip()))
sys.stdout.flush()
db_name = 'couchdb-python/perftest'
db = server.create(db_name)
try:
start = time.time()
func(db)
stop = time.time()
sys.stdout.write("%0.2fs\n" % (stop-start,))
sys.stdout.flush()
except Exception, e:
sys.stdout.write("FAILED - %r\n" % (unicode(e),))
sys.stdout.flush()
finally:
server.delete(db_name)
def create_doc(db):
"""Create lots of docs, one at a time"""
for i in range(1000):
db.save({'_id': unicode(i)})
def create_bulk_docs(db):
"""Create lots of docs, lots at a time"""
batch_size = 100
num_batches = 1000
for i in range(num_batches):
db.update([{'_id': unicode((i * batch_size) + j)} for j in range(batch_size)])
if __name__ == '__main__':
main()
|
|
216c9e937263838f126474a0728ed9d43797c3e5
|
modify_wpr_delays.py
|
modify_wpr_delays.py
|
#!/usr/bin/env python
# TODO(cs): make cache hit ratio tunable.
# TODO(cs): two analyses: perfect cachability at proxy, and perfect
# cachability in browser. Use CC: private vs. CC: public to make distinction.
# TODO(cs): don't assume that cache is co-located with browser, i.e. insert
# delays between browser and perfect cache.
# TODO(cs): account for performance cost of conditional GETs
# TODO(cs): show a CDF of fraction of response bytes that are cacheable per
# site.
from httparchive import HttpArchive
import glob
import re
import optparse
# Modified archive.
def assume_perfect_cache(archive):
for request in archive:
response = archive[request]
if is_cacheable(response):
# Set all delays to zero:
response.delays = None
response.fix_delays()
def is_cacheable(response):
# We use an array to handle the case where there are redundant headers. The
# most restrictive caching header wins.
cc_headers = []
expires_headers = []
for (name, value) in response.headers:
if re.match("cache-control", name, re.IGNORECASE):
cc_headers.append(value)
if re.match("expires", name, re.IGNORECASE):
expires_headers.append(value)
# N.B. we consider undefined as cacheable.
# WHEN LENGTH(resp_cache_control) = 0
# AND LENGTH(resp_expires) = 0
# THEN "undefined"
if cc_headers == [] and expires_headers == []:
return True
# WHEN resp_cache_control CONTAINS "no-store"
# OR resp_cache_control CONTAINS "no-cache"
# OR resp_cache_control CONTAINS "max-age=0"
# OR resp_expires = "-1"
# THEN "non-cacheable"
for cc_header in cc_headers:
if (re.match("no-store", cc_header, re.IGNORECASE) or
re.match("no-cache", cc_header, re.IGNORECASE) or
re.match("max-age=0", cc_header, re.IGNORECASE)):
return False
for expires_header in expires_headers:
if re.match("-1", expires_header, re.IGNORECASE):
return False
# ELSE "cacheable"
return True
if __name__ == '__main__':
option_parser = optparse.OptionParser(
usage='%prog <directory containing wpr files>')
options, args = option_parser.parse_args()
if len(args) < 1:
print 'args: %s' % args
option_parser.error('Must specify a directory containing wpr files')
for wpr in glob.iglob(args[0] + "/*.wpr"):
archive = HttpArchive.Load(wpr)
assume_perfect_cache(archive)
output_file = re.sub('.wpr$', '.pc.har', wpr)
archive.Persist(output_file)
|
Add a script for fingind cacheable responses and setting their delays to 0
|
Add a script for fingind cacheable responses and setting their delays to 0
|
Python
|
apache-2.0
|
colin-scott/web-page-replay,colin-scott/web-page-replay
|
Add a script for fingind cacheable responses and setting their delays to 0
|
#!/usr/bin/env python
# TODO(cs): make cache hit ratio tunable.
# TODO(cs): two analyses: perfect cachability at proxy, and perfect
# cachability in browser. Use CC: private vs. CC: public to make distinction.
# TODO(cs): don't assume that cache is co-located with browser, i.e. insert
# delays between browser and perfect cache.
# TODO(cs): account for performance cost of conditional GETs
# TODO(cs): show a CDF of fraction of response bytes that are cacheable per
# site.
from httparchive import HttpArchive
import glob
import re
import optparse
# Modified archive.
def assume_perfect_cache(archive):
for request in archive:
response = archive[request]
if is_cacheable(response):
# Set all delays to zero:
response.delays = None
response.fix_delays()
def is_cacheable(response):
# We use an array to handle the case where there are redundant headers. The
# most restrictive caching header wins.
cc_headers = []
expires_headers = []
for (name, value) in response.headers:
if re.match("cache-control", name, re.IGNORECASE):
cc_headers.append(value)
if re.match("expires", name, re.IGNORECASE):
expires_headers.append(value)
# N.B. we consider undefined as cacheable.
# WHEN LENGTH(resp_cache_control) = 0
# AND LENGTH(resp_expires) = 0
# THEN "undefined"
if cc_headers == [] and expires_headers == []:
return True
# WHEN resp_cache_control CONTAINS "no-store"
# OR resp_cache_control CONTAINS "no-cache"
# OR resp_cache_control CONTAINS "max-age=0"
# OR resp_expires = "-1"
# THEN "non-cacheable"
for cc_header in cc_headers:
if (re.match("no-store", cc_header, re.IGNORECASE) or
re.match("no-cache", cc_header, re.IGNORECASE) or
re.match("max-age=0", cc_header, re.IGNORECASE)):
return False
for expires_header in expires_headers:
if re.match("-1", expires_header, re.IGNORECASE):
return False
# ELSE "cacheable"
return True
if __name__ == '__main__':
option_parser = optparse.OptionParser(
usage='%prog <directory containing wpr files>')
options, args = option_parser.parse_args()
if len(args) < 1:
print 'args: %s' % args
option_parser.error('Must specify a directory containing wpr files')
for wpr in glob.iglob(args[0] + "/*.wpr"):
archive = HttpArchive.Load(wpr)
assume_perfect_cache(archive)
output_file = re.sub('.wpr$', '.pc.har', wpr)
archive.Persist(output_file)
|
<commit_before><commit_msg>Add a script for fingind cacheable responses and setting their delays to 0<commit_after>
|
#!/usr/bin/env python
# TODO(cs): make cache hit ratio tunable.
# TODO(cs): two analyses: perfect cachability at proxy, and perfect
# cachability in browser. Use CC: private vs. CC: public to make distinction.
# TODO(cs): don't assume that cache is co-located with browser, i.e. insert
# delays between browser and perfect cache.
# TODO(cs): account for performance cost of conditional GETs
# TODO(cs): show a CDF of fraction of response bytes that are cacheable per
# site.
from httparchive import HttpArchive
import glob
import re
import optparse
# Modified archive.
def assume_perfect_cache(archive):
for request in archive:
response = archive[request]
if is_cacheable(response):
# Set all delays to zero:
response.delays = None
response.fix_delays()
def is_cacheable(response):
# We use an array to handle the case where there are redundant headers. The
# most restrictive caching header wins.
cc_headers = []
expires_headers = []
for (name, value) in response.headers:
if re.match("cache-control", name, re.IGNORECASE):
cc_headers.append(value)
if re.match("expires", name, re.IGNORECASE):
expires_headers.append(value)
# N.B. we consider undefined as cacheable.
# WHEN LENGTH(resp_cache_control) = 0
# AND LENGTH(resp_expires) = 0
# THEN "undefined"
if cc_headers == [] and expires_headers == []:
return True
# WHEN resp_cache_control CONTAINS "no-store"
# OR resp_cache_control CONTAINS "no-cache"
# OR resp_cache_control CONTAINS "max-age=0"
# OR resp_expires = "-1"
# THEN "non-cacheable"
for cc_header in cc_headers:
if (re.match("no-store", cc_header, re.IGNORECASE) or
re.match("no-cache", cc_header, re.IGNORECASE) or
re.match("max-age=0", cc_header, re.IGNORECASE)):
return False
for expires_header in expires_headers:
if re.match("-1", expires_header, re.IGNORECASE):
return False
# ELSE "cacheable"
return True
if __name__ == '__main__':
option_parser = optparse.OptionParser(
usage='%prog <directory containing wpr files>')
options, args = option_parser.parse_args()
if len(args) < 1:
print 'args: %s' % args
option_parser.error('Must specify a directory containing wpr files')
for wpr in glob.iglob(args[0] + "/*.wpr"):
archive = HttpArchive.Load(wpr)
assume_perfect_cache(archive)
output_file = re.sub('.wpr$', '.pc.har', wpr)
archive.Persist(output_file)
|
Add a script for fingind cacheable responses and setting their delays to 0#!/usr/bin/env python
# TODO(cs): make cache hit ratio tunable.
# TODO(cs): two analyses: perfect cachability at proxy, and perfect
# cachability in browser. Use CC: private vs. CC: public to make distinction.
# TODO(cs): don't assume that cache is co-located with browser, i.e. insert
# delays between browser and perfect cache.
# TODO(cs): account for performance cost of conditional GETs
# TODO(cs): show a CDF of fraction of response bytes that are cacheable per
# site.
from httparchive import HttpArchive
import glob
import re
import optparse
# Modified archive.
def assume_perfect_cache(archive):
for request in archive:
response = archive[request]
if is_cacheable(response):
# Set all delays to zero:
response.delays = None
response.fix_delays()
def is_cacheable(response):
# We use an array to handle the case where there are redundant headers. The
# most restrictive caching header wins.
cc_headers = []
expires_headers = []
for (name, value) in response.headers:
if re.match("cache-control", name, re.IGNORECASE):
cc_headers.append(value)
if re.match("expires", name, re.IGNORECASE):
expires_headers.append(value)
# N.B. we consider undefined as cacheable.
# WHEN LENGTH(resp_cache_control) = 0
# AND LENGTH(resp_expires) = 0
# THEN "undefined"
if cc_headers == [] and expires_headers == []:
return True
# WHEN resp_cache_control CONTAINS "no-store"
# OR resp_cache_control CONTAINS "no-cache"
# OR resp_cache_control CONTAINS "max-age=0"
# OR resp_expires = "-1"
# THEN "non-cacheable"
for cc_header in cc_headers:
if (re.match("no-store", cc_header, re.IGNORECASE) or
re.match("no-cache", cc_header, re.IGNORECASE) or
re.match("max-age=0", cc_header, re.IGNORECASE)):
return False
for expires_header in expires_headers:
if re.match("-1", expires_header, re.IGNORECASE):
return False
# ELSE "cacheable"
return True
if __name__ == '__main__':
option_parser = optparse.OptionParser(
usage='%prog <directory containing wpr files>')
options, args = option_parser.parse_args()
if len(args) < 1:
print 'args: %s' % args
option_parser.error('Must specify a directory containing wpr files')
for wpr in glob.iglob(args[0] + "/*.wpr"):
archive = HttpArchive.Load(wpr)
assume_perfect_cache(archive)
output_file = re.sub('.wpr$', '.pc.har', wpr)
archive.Persist(output_file)
|
<commit_before><commit_msg>Add a script for fingind cacheable responses and setting their delays to 0<commit_after>#!/usr/bin/env python
# TODO(cs): make cache hit ratio tunable.
# TODO(cs): two analyses: perfect cachability at proxy, and perfect
# cachability in browser. Use CC: private vs. CC: public to make distinction.
# TODO(cs): don't assume that cache is co-located with browser, i.e. insert
# delays between browser and perfect cache.
# TODO(cs): account for performance cost of conditional GETs
# TODO(cs): show a CDF of fraction of response bytes that are cacheable per
# site.
from httparchive import HttpArchive
import glob
import re
import optparse
# Modified archive.
def assume_perfect_cache(archive):
for request in archive:
response = archive[request]
if is_cacheable(response):
# Set all delays to zero:
response.delays = None
response.fix_delays()
def is_cacheable(response):
# We use an array to handle the case where there are redundant headers. The
# most restrictive caching header wins.
cc_headers = []
expires_headers = []
for (name, value) in response.headers:
if re.match("cache-control", name, re.IGNORECASE):
cc_headers.append(value)
if re.match("expires", name, re.IGNORECASE):
expires_headers.append(value)
# N.B. we consider undefined as cacheable.
# WHEN LENGTH(resp_cache_control) = 0
# AND LENGTH(resp_expires) = 0
# THEN "undefined"
if cc_headers == [] and expires_headers == []:
return True
# WHEN resp_cache_control CONTAINS "no-store"
# OR resp_cache_control CONTAINS "no-cache"
# OR resp_cache_control CONTAINS "max-age=0"
# OR resp_expires = "-1"
# THEN "non-cacheable"
for cc_header in cc_headers:
if (re.match("no-store", cc_header, re.IGNORECASE) or
re.match("no-cache", cc_header, re.IGNORECASE) or
re.match("max-age=0", cc_header, re.IGNORECASE)):
return False
for expires_header in expires_headers:
if re.match("-1", expires_header, re.IGNORECASE):
return False
# ELSE "cacheable"
return True
if __name__ == '__main__':
option_parser = optparse.OptionParser(
usage='%prog <directory containing wpr files>')
options, args = option_parser.parse_args()
if len(args) < 1:
print 'args: %s' % args
option_parser.error('Must specify a directory containing wpr files')
for wpr in glob.iglob(args[0] + "/*.wpr"):
archive = HttpArchive.Load(wpr)
assume_perfect_cache(archive)
output_file = re.sub('.wpr$', '.pc.har', wpr)
archive.Persist(output_file)
|
|
7fea10691b8f8d15e31c30949d15a69e20ee4214
|
scripts/identify-unpinned.py
|
scripts/identify-unpinned.py
|
import yaml
import glob
for file in glob.glob("*.yaml"):
print("Processing %s" % file)
with open(file, 'r') as handle:
w = yaml.load(handle)
for tool in w['tools']:
print(tool)
if 'changeset_revision' not in tool:
print(tool)
for file in glob.glob("*.yml"):
print("Processing %s" % file)
with open(file, 'r') as handle:
w = yaml.load(handle)
for tool in w['tools']:
print(tool)
if 'changeset_revision' not in tool:
print(tool)
|
Add script to identify unpinned repos
|
Add script to identify unpinned repos
|
Python
|
mit
|
usegalaxy-eu/usegalaxy-eu-tools,usegalaxy-eu/usegalaxy-eu-tools
|
Add script to identify unpinned repos
|
import yaml
import glob
for file in glob.glob("*.yaml"):
print("Processing %s" % file)
with open(file, 'r') as handle:
w = yaml.load(handle)
for tool in w['tools']:
print(tool)
if 'changeset_revision' not in tool:
print(tool)
for file in glob.glob("*.yml"):
print("Processing %s" % file)
with open(file, 'r') as handle:
w = yaml.load(handle)
for tool in w['tools']:
print(tool)
if 'changeset_revision' not in tool:
print(tool)
|
<commit_before><commit_msg>Add script to identify unpinned repos<commit_after>
|
import yaml
import glob
for file in glob.glob("*.yaml"):
print("Processing %s" % file)
with open(file, 'r') as handle:
w = yaml.load(handle)
for tool in w['tools']:
print(tool)
if 'changeset_revision' not in tool:
print(tool)
for file in glob.glob("*.yml"):
print("Processing %s" % file)
with open(file, 'r') as handle:
w = yaml.load(handle)
for tool in w['tools']:
print(tool)
if 'changeset_revision' not in tool:
print(tool)
|
Add script to identify unpinned reposimport yaml
import glob
for file in glob.glob("*.yaml"):
print("Processing %s" % file)
with open(file, 'r') as handle:
w = yaml.load(handle)
for tool in w['tools']:
print(tool)
if 'changeset_revision' not in tool:
print(tool)
for file in glob.glob("*.yml"):
print("Processing %s" % file)
with open(file, 'r') as handle:
w = yaml.load(handle)
for tool in w['tools']:
print(tool)
if 'changeset_revision' not in tool:
print(tool)
|
<commit_before><commit_msg>Add script to identify unpinned repos<commit_after>import yaml
import glob
for file in glob.glob("*.yaml"):
print("Processing %s" % file)
with open(file, 'r') as handle:
w = yaml.load(handle)
for tool in w['tools']:
print(tool)
if 'changeset_revision' not in tool:
print(tool)
for file in glob.glob("*.yml"):
print("Processing %s" % file)
with open(file, 'r') as handle:
w = yaml.load(handle)
for tool in w['tools']:
print(tool)
if 'changeset_revision' not in tool:
print(tool)
|
|
cd27adc357655e9cd25c5d23a171920addd7c8f5
|
jupyter_notebook_config.py
|
jupyter_notebook_config.py
|
# Based off of https://github.com/jupyter/notebook/blob/master/docs/source/extending/savehooks.rst
import io
import os
from notebook.utils import to_api_path
_script_exporter = None
_html_exporter = None
def script_post_save(model, os_path, contents_manager, **kwargs):
"""convert notebooks to Python script after save with nbconvert
replaces `ipython notebook --script`
"""
from nbconvert.exporters.script import ScriptExporter
if model['type'] != 'notebook':
return
global _script_exporter
if _script_exporter is None:
_script_exporter = ScriptExporter(parent=contents_manager)
log = contents_manager.log
# save .py file
base, ext = os.path.splitext(os_path)
script, resources = _script_exporter.from_filename(os_path)
script_fname = base + resources.get('output_extension', '.txt')
log.info("Saving script /%s", to_api_path(script_fname, contents_manager.root_dir))
with io.open(script_fname, 'w', encoding='utf-8') as f:
f.write(script)
c.FileContentsManager.post_save_hook = script_post_save
|
Add script to automatically save py file in jupyter.
|
Add script to automatically save py file in jupyter.
|
Python
|
bsd-3-clause
|
daichi-yoshikawa/dnn
|
Add script to automatically save py file in jupyter.
|
# Based off of https://github.com/jupyter/notebook/blob/master/docs/source/extending/savehooks.rst
import io
import os
from notebook.utils import to_api_path
_script_exporter = None
_html_exporter = None
def script_post_save(model, os_path, contents_manager, **kwargs):
"""convert notebooks to Python script after save with nbconvert
replaces `ipython notebook --script`
"""
from nbconvert.exporters.script import ScriptExporter
if model['type'] != 'notebook':
return
global _script_exporter
if _script_exporter is None:
_script_exporter = ScriptExporter(parent=contents_manager)
log = contents_manager.log
# save .py file
base, ext = os.path.splitext(os_path)
script, resources = _script_exporter.from_filename(os_path)
script_fname = base + resources.get('output_extension', '.txt')
log.info("Saving script /%s", to_api_path(script_fname, contents_manager.root_dir))
with io.open(script_fname, 'w', encoding='utf-8') as f:
f.write(script)
c.FileContentsManager.post_save_hook = script_post_save
|
<commit_before><commit_msg>Add script to automatically save py file in jupyter.<commit_after>
|
# Based off of https://github.com/jupyter/notebook/blob/master/docs/source/extending/savehooks.rst
import io
import os
from notebook.utils import to_api_path
_script_exporter = None
_html_exporter = None
def script_post_save(model, os_path, contents_manager, **kwargs):
"""convert notebooks to Python script after save with nbconvert
replaces `ipython notebook --script`
"""
from nbconvert.exporters.script import ScriptExporter
if model['type'] != 'notebook':
return
global _script_exporter
if _script_exporter is None:
_script_exporter = ScriptExporter(parent=contents_manager)
log = contents_manager.log
# save .py file
base, ext = os.path.splitext(os_path)
script, resources = _script_exporter.from_filename(os_path)
script_fname = base + resources.get('output_extension', '.txt')
log.info("Saving script /%s", to_api_path(script_fname, contents_manager.root_dir))
with io.open(script_fname, 'w', encoding='utf-8') as f:
f.write(script)
c.FileContentsManager.post_save_hook = script_post_save
|
Add script to automatically save py file in jupyter.# Based off of https://github.com/jupyter/notebook/blob/master/docs/source/extending/savehooks.rst
import io
import os
from notebook.utils import to_api_path
_script_exporter = None
_html_exporter = None
def script_post_save(model, os_path, contents_manager, **kwargs):
"""convert notebooks to Python script after save with nbconvert
replaces `ipython notebook --script`
"""
from nbconvert.exporters.script import ScriptExporter
if model['type'] != 'notebook':
return
global _script_exporter
if _script_exporter is None:
_script_exporter = ScriptExporter(parent=contents_manager)
log = contents_manager.log
# save .py file
base, ext = os.path.splitext(os_path)
script, resources = _script_exporter.from_filename(os_path)
script_fname = base + resources.get('output_extension', '.txt')
log.info("Saving script /%s", to_api_path(script_fname, contents_manager.root_dir))
with io.open(script_fname, 'w', encoding='utf-8') as f:
f.write(script)
c.FileContentsManager.post_save_hook = script_post_save
|
<commit_before><commit_msg>Add script to automatically save py file in jupyter.<commit_after># Based off of https://github.com/jupyter/notebook/blob/master/docs/source/extending/savehooks.rst
import io
import os
from notebook.utils import to_api_path
_script_exporter = None
_html_exporter = None
def script_post_save(model, os_path, contents_manager, **kwargs):
"""convert notebooks to Python script after save with nbconvert
replaces `ipython notebook --script`
"""
from nbconvert.exporters.script import ScriptExporter
if model['type'] != 'notebook':
return
global _script_exporter
if _script_exporter is None:
_script_exporter = ScriptExporter(parent=contents_manager)
log = contents_manager.log
# save .py file
base, ext = os.path.splitext(os_path)
script, resources = _script_exporter.from_filename(os_path)
script_fname = base + resources.get('output_extension', '.txt')
log.info("Saving script /%s", to_api_path(script_fname, contents_manager.root_dir))
with io.open(script_fname, 'w', encoding='utf-8') as f:
f.write(script)
c.FileContentsManager.post_save_hook = script_post_save
|
|
08e7dd7f9e1ee8d9ec57f2ec7b6a68caee1c8f0d
|
py/number-of-boomerangs.py
|
py/number-of-boomerangs.py
|
from collections import Counter
class Solution(object):
def numberOfBoomerangs(self, points):
"""
:type points: List[List[int]]
:rtype: int
"""
cs = [Counter() for _ in xrange(len(points))]
for i1, p1 in enumerate(points):
for i2 in range(i1 + 1, len(points)):
p2 = points[i2]
d = (p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2
cs[i1][d] += 1
cs[i2][d] += 1
ans = 0
for c in cs:
for count in c.values():
ans += count * (count - 1)
return ans
|
Add py solution for 447. Number of Boomerangs
|
Add py solution for 447. Number of Boomerangs
447. Number of Boomerangs: https://leetcode.com/problems/number-of-boomerangs/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 447. Number of Boomerangs
447. Number of Boomerangs: https://leetcode.com/problems/number-of-boomerangs/
|
from collections import Counter
class Solution(object):
def numberOfBoomerangs(self, points):
"""
:type points: List[List[int]]
:rtype: int
"""
cs = [Counter() for _ in xrange(len(points))]
for i1, p1 in enumerate(points):
for i2 in range(i1 + 1, len(points)):
p2 = points[i2]
d = (p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2
cs[i1][d] += 1
cs[i2][d] += 1
ans = 0
for c in cs:
for count in c.values():
ans += count * (count - 1)
return ans
|
<commit_before><commit_msg>Add py solution for 447. Number of Boomerangs
447. Number of Boomerangs: https://leetcode.com/problems/number-of-boomerangs/<commit_after>
|
from collections import Counter
class Solution(object):
def numberOfBoomerangs(self, points):
"""
:type points: List[List[int]]
:rtype: int
"""
cs = [Counter() for _ in xrange(len(points))]
for i1, p1 in enumerate(points):
for i2 in range(i1 + 1, len(points)):
p2 = points[i2]
d = (p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2
cs[i1][d] += 1
cs[i2][d] += 1
ans = 0
for c in cs:
for count in c.values():
ans += count * (count - 1)
return ans
|
Add py solution for 447. Number of Boomerangs
447. Number of Boomerangs: https://leetcode.com/problems/number-of-boomerangs/from collections import Counter
class Solution(object):
def numberOfBoomerangs(self, points):
"""
:type points: List[List[int]]
:rtype: int
"""
cs = [Counter() for _ in xrange(len(points))]
for i1, p1 in enumerate(points):
for i2 in range(i1 + 1, len(points)):
p2 = points[i2]
d = (p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2
cs[i1][d] += 1
cs[i2][d] += 1
ans = 0
for c in cs:
for count in c.values():
ans += count * (count - 1)
return ans
|
<commit_before><commit_msg>Add py solution for 447. Number of Boomerangs
447. Number of Boomerangs: https://leetcode.com/problems/number-of-boomerangs/<commit_after>from collections import Counter
class Solution(object):
def numberOfBoomerangs(self, points):
"""
:type points: List[List[int]]
:rtype: int
"""
cs = [Counter() for _ in xrange(len(points))]
for i1, p1 in enumerate(points):
for i2 in range(i1 + 1, len(points)):
p2 = points[i2]
d = (p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2
cs[i1][d] += 1
cs[i2][d] += 1
ans = 0
for c in cs:
for count in c.values():
ans += count * (count - 1)
return ans
|
|
1abdf4b669f9d5329c8e0895956af2b0ebf2bfdd
|
housemarket/housesales/migrations/0001_initial.py
|
housemarket/housesales/migrations/0001_initial.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='HouseSales',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('transaction_id', models.CharField(max_length=40)),
('price', models.DecimalField(max_digits=12, decimal_places=2)),
('date_of_transfer', models.DateField()),
('postcode', models.CharField(max_length=10)),
('property_type', models.CharField(max_length=1)),
('old_new', models.CharField(max_length=1)),
('duration', models.CharField(max_length=1)),
('paon', models.CharField(max_length=250)),
('saon', models.CharField(max_length=250)),
('street', models.CharField(max_length=250)),
('locality', models.CharField(max_length=250)),
('town_city', models.CharField(max_length=250)),
('district', models.CharField(max_length=250)),
('county', models.CharField(max_length=250)),
('status', models.CharField(max_length=1)),
],
),
]
|
Add initial db migration for HouseSales
|
Add initial db migration for HouseSales
|
Python
|
mit
|
andreagrandi/sold-house-prices
|
Add initial db migration for HouseSales
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='HouseSales',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('transaction_id', models.CharField(max_length=40)),
('price', models.DecimalField(max_digits=12, decimal_places=2)),
('date_of_transfer', models.DateField()),
('postcode', models.CharField(max_length=10)),
('property_type', models.CharField(max_length=1)),
('old_new', models.CharField(max_length=1)),
('duration', models.CharField(max_length=1)),
('paon', models.CharField(max_length=250)),
('saon', models.CharField(max_length=250)),
('street', models.CharField(max_length=250)),
('locality', models.CharField(max_length=250)),
('town_city', models.CharField(max_length=250)),
('district', models.CharField(max_length=250)),
('county', models.CharField(max_length=250)),
('status', models.CharField(max_length=1)),
],
),
]
|
<commit_before><commit_msg>Add initial db migration for HouseSales<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='HouseSales',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('transaction_id', models.CharField(max_length=40)),
('price', models.DecimalField(max_digits=12, decimal_places=2)),
('date_of_transfer', models.DateField()),
('postcode', models.CharField(max_length=10)),
('property_type', models.CharField(max_length=1)),
('old_new', models.CharField(max_length=1)),
('duration', models.CharField(max_length=1)),
('paon', models.CharField(max_length=250)),
('saon', models.CharField(max_length=250)),
('street', models.CharField(max_length=250)),
('locality', models.CharField(max_length=250)),
('town_city', models.CharField(max_length=250)),
('district', models.CharField(max_length=250)),
('county', models.CharField(max_length=250)),
('status', models.CharField(max_length=1)),
],
),
]
|
Add initial db migration for HouseSales# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='HouseSales',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('transaction_id', models.CharField(max_length=40)),
('price', models.DecimalField(max_digits=12, decimal_places=2)),
('date_of_transfer', models.DateField()),
('postcode', models.CharField(max_length=10)),
('property_type', models.CharField(max_length=1)),
('old_new', models.CharField(max_length=1)),
('duration', models.CharField(max_length=1)),
('paon', models.CharField(max_length=250)),
('saon', models.CharField(max_length=250)),
('street', models.CharField(max_length=250)),
('locality', models.CharField(max_length=250)),
('town_city', models.CharField(max_length=250)),
('district', models.CharField(max_length=250)),
('county', models.CharField(max_length=250)),
('status', models.CharField(max_length=1)),
],
),
]
|
<commit_before><commit_msg>Add initial db migration for HouseSales<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='HouseSales',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('transaction_id', models.CharField(max_length=40)),
('price', models.DecimalField(max_digits=12, decimal_places=2)),
('date_of_transfer', models.DateField()),
('postcode', models.CharField(max_length=10)),
('property_type', models.CharField(max_length=1)),
('old_new', models.CharField(max_length=1)),
('duration', models.CharField(max_length=1)),
('paon', models.CharField(max_length=250)),
('saon', models.CharField(max_length=250)),
('street', models.CharField(max_length=250)),
('locality', models.CharField(max_length=250)),
('town_city', models.CharField(max_length=250)),
('district', models.CharField(max_length=250)),
('county', models.CharField(max_length=250)),
('status', models.CharField(max_length=1)),
],
),
]
|
|
645cbafc95f69d9d8d70ce3438d3907fed1024a2
|
pyVers/neuralnet.py
|
pyVers/neuralnet.py
|
import numpy as np
class NeuralNEt(object):
def __init__(self, inputs, outputs, *hiddens):
self.shape = [inputs] + list(hiddens) + [outputs]
self.layers = len(self.shape)
self.weights = self.__randInitialize()
def __randInitialize(self):
"""Randomly initializes the weight matrices."""
weights = []
for i in xrange(self.layers - 1):
Nin = self.shape[i]
Nout = self.shape[i+1]
eps = np.sqrt(6)/np.sqrt(Nin + Nout)
weights.append( randMatrix((Nout, Nin + 1), eps) )
return weights
def __unflatten(self, flat):
"""used by the cost function to unflatten weight matrices."""
matrices = []
start = 0
for i in xrange(self.layers - 1):
Nin = self.shape[i] +1
Nout = self.shape[i+1]
end = Nout * Nin + start
arr = flat[start:end].reshape( (Nout, Nin) )
matrices.append(arr)
start = end
return matrices
def randMatrix(size, eps):
"""Returns random matrix with shape = size whose values range in [-eps,eps]."""
return 2*eps*np.random.random_sample(size) - eps
def sigmoid(z):
"""Returns the sigmoid function evaluated on z (z can be any numpy array or scalar)."""
return 1/(1 + np.exp(-z))
if __name__ == "__main__":
nn = NeuralNEt(9, 9, 5, 6)
w = nn.weights
flat = np.append(w[0].flatten(), w[1].flatten())
flat = np.append(flat, w[2].flatten())
recw = nn.unflatten(flat)
print w[0] - recw[0]
print w[1] - recw[1]
print w[2] - recw[2]
|
Work begun on neural network class.
|
Work begun on neural network class.
|
Python
|
mit
|
m0baxter/tic-tac-toe-AI
|
Work begun on neural network class.
|
import numpy as np
class NeuralNEt(object):
def __init__(self, inputs, outputs, *hiddens):
self.shape = [inputs] + list(hiddens) + [outputs]
self.layers = len(self.shape)
self.weights = self.__randInitialize()
def __randInitialize(self):
"""Randomly initializes the weight matrices."""
weights = []
for i in xrange(self.layers - 1):
Nin = self.shape[i]
Nout = self.shape[i+1]
eps = np.sqrt(6)/np.sqrt(Nin + Nout)
weights.append( randMatrix((Nout, Nin + 1), eps) )
return weights
def __unflatten(self, flat):
"""used by the cost function to unflatten weight matrices."""
matrices = []
start = 0
for i in xrange(self.layers - 1):
Nin = self.shape[i] +1
Nout = self.shape[i+1]
end = Nout * Nin + start
arr = flat[start:end].reshape( (Nout, Nin) )
matrices.append(arr)
start = end
return matrices
def randMatrix(size, eps):
"""Returns random matrix with shape = size whose values range in [-eps,eps]."""
return 2*eps*np.random.random_sample(size) - eps
def sigmoid(z):
"""Returns the sigmoid function evaluated on z (z can be any numpy array or scalar)."""
return 1/(1 + np.exp(-z))
if __name__ == "__main__":
nn = NeuralNEt(9, 9, 5, 6)
w = nn.weights
flat = np.append(w[0].flatten(), w[1].flatten())
flat = np.append(flat, w[2].flatten())
recw = nn.unflatten(flat)
print w[0] - recw[0]
print w[1] - recw[1]
print w[2] - recw[2]
|
<commit_before><commit_msg>Work begun on neural network class.<commit_after>
|
import numpy as np
class NeuralNEt(object):
def __init__(self, inputs, outputs, *hiddens):
self.shape = [inputs] + list(hiddens) + [outputs]
self.layers = len(self.shape)
self.weights = self.__randInitialize()
def __randInitialize(self):
"""Randomly initializes the weight matrices."""
weights = []
for i in xrange(self.layers - 1):
Nin = self.shape[i]
Nout = self.shape[i+1]
eps = np.sqrt(6)/np.sqrt(Nin + Nout)
weights.append( randMatrix((Nout, Nin + 1), eps) )
return weights
def __unflatten(self, flat):
"""used by the cost function to unflatten weight matrices."""
matrices = []
start = 0
for i in xrange(self.layers - 1):
Nin = self.shape[i] +1
Nout = self.shape[i+1]
end = Nout * Nin + start
arr = flat[start:end].reshape( (Nout, Nin) )
matrices.append(arr)
start = end
return matrices
def randMatrix(size, eps):
"""Returns random matrix with shape = size whose values range in [-eps,eps]."""
return 2*eps*np.random.random_sample(size) - eps
def sigmoid(z):
"""Returns the sigmoid function evaluated on z (z can be any numpy array or scalar)."""
return 1/(1 + np.exp(-z))
if __name__ == "__main__":
nn = NeuralNEt(9, 9, 5, 6)
w = nn.weights
flat = np.append(w[0].flatten(), w[1].flatten())
flat = np.append(flat, w[2].flatten())
recw = nn.unflatten(flat)
print w[0] - recw[0]
print w[1] - recw[1]
print w[2] - recw[2]
|
Work begun on neural network class.
import numpy as np
class NeuralNEt(object):
def __init__(self, inputs, outputs, *hiddens):
self.shape = [inputs] + list(hiddens) + [outputs]
self.layers = len(self.shape)
self.weights = self.__randInitialize()
def __randInitialize(self):
"""Randomly initializes the weight matrices."""
weights = []
for i in xrange(self.layers - 1):
Nin = self.shape[i]
Nout = self.shape[i+1]
eps = np.sqrt(6)/np.sqrt(Nin + Nout)
weights.append( randMatrix((Nout, Nin + 1), eps) )
return weights
def __unflatten(self, flat):
"""used by the cost function to unflatten weight matrices."""
matrices = []
start = 0
for i in xrange(self.layers - 1):
Nin = self.shape[i] +1
Nout = self.shape[i+1]
end = Nout * Nin + start
arr = flat[start:end].reshape( (Nout, Nin) )
matrices.append(arr)
start = end
return matrices
def randMatrix(size, eps):
"""Returns random matrix with shape = size whose values range in [-eps,eps]."""
return 2*eps*np.random.random_sample(size) - eps
def sigmoid(z):
"""Returns the sigmoid function evaluated on z (z can be any numpy array or scalar)."""
return 1/(1 + np.exp(-z))
if __name__ == "__main__":
nn = NeuralNEt(9, 9, 5, 6)
w = nn.weights
flat = np.append(w[0].flatten(), w[1].flatten())
flat = np.append(flat, w[2].flatten())
recw = nn.unflatten(flat)
print w[0] - recw[0]
print w[1] - recw[1]
print w[2] - recw[2]
|
<commit_before><commit_msg>Work begun on neural network class.<commit_after>
import numpy as np
class NeuralNEt(object):
def __init__(self, inputs, outputs, *hiddens):
self.shape = [inputs] + list(hiddens) + [outputs]
self.layers = len(self.shape)
self.weights = self.__randInitialize()
def __randInitialize(self):
"""Randomly initializes the weight matrices."""
weights = []
for i in xrange(self.layers - 1):
Nin = self.shape[i]
Nout = self.shape[i+1]
eps = np.sqrt(6)/np.sqrt(Nin + Nout)
weights.append( randMatrix((Nout, Nin + 1), eps) )
return weights
def __unflatten(self, flat):
"""used by the cost function to unflatten weight matrices."""
matrices = []
start = 0
for i in xrange(self.layers - 1):
Nin = self.shape[i] +1
Nout = self.shape[i+1]
end = Nout * Nin + start
arr = flat[start:end].reshape( (Nout, Nin) )
matrices.append(arr)
start = end
return matrices
def randMatrix(size, eps):
"""Returns random matrix with shape = size whose values range in [-eps,eps]."""
return 2*eps*np.random.random_sample(size) - eps
def sigmoid(z):
"""Returns the sigmoid function evaluated on z (z can be any numpy array or scalar)."""
return 1/(1 + np.exp(-z))
if __name__ == "__main__":
nn = NeuralNEt(9, 9, 5, 6)
w = nn.weights
flat = np.append(w[0].flatten(), w[1].flatten())
flat = np.append(flat, w[2].flatten())
recw = nn.unflatten(flat)
print w[0] - recw[0]
print w[1] - recw[1]
print w[2] - recw[2]
|
|
453174d7acb9156fda29e4192691ef2d14eda2bb
|
bookmarks/bookmarks/migrations/0005_rename_app.py
|
bookmarks/bookmarks/migrations/0005_rename_app.py
|
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('bookmarks', '0004_auto_20160901_2322'),
]
operations = [
migrations.RunSQL("DROP TABLE bookmarks_bookmark;"),
migrations.RunSQL("ALTER TABLE core_bookmark RENAME TO bookmarks_bookmark;"),
migrations.RunSQL("UPDATE django_content_type SET app_label='bookmarks' WHERE app_label='core';"),
]
|
Add one-way migration from core to bookmarks
|
Add one-way migration from core to bookmarks
|
Python
|
mit
|
tom-henderson/bookmarks,tom-henderson/bookmarks,tom-henderson/bookmarks
|
Add one-way migration from core to bookmarks
|
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('bookmarks', '0004_auto_20160901_2322'),
]
operations = [
migrations.RunSQL("DROP TABLE bookmarks_bookmark;"),
migrations.RunSQL("ALTER TABLE core_bookmark RENAME TO bookmarks_bookmark;"),
migrations.RunSQL("UPDATE django_content_type SET app_label='bookmarks' WHERE app_label='core';"),
]
|
<commit_before><commit_msg>Add one-way migration from core to bookmarks<commit_after>
|
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('bookmarks', '0004_auto_20160901_2322'),
]
operations = [
migrations.RunSQL("DROP TABLE bookmarks_bookmark;"),
migrations.RunSQL("ALTER TABLE core_bookmark RENAME TO bookmarks_bookmark;"),
migrations.RunSQL("UPDATE django_content_type SET app_label='bookmarks' WHERE app_label='core';"),
]
|
Add one-way migration from core to bookmarksfrom django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('bookmarks', '0004_auto_20160901_2322'),
]
operations = [
migrations.RunSQL("DROP TABLE bookmarks_bookmark;"),
migrations.RunSQL("ALTER TABLE core_bookmark RENAME TO bookmarks_bookmark;"),
migrations.RunSQL("UPDATE django_content_type SET app_label='bookmarks' WHERE app_label='core';"),
]
|
<commit_before><commit_msg>Add one-way migration from core to bookmarks<commit_after>from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('bookmarks', '0004_auto_20160901_2322'),
]
operations = [
migrations.RunSQL("DROP TABLE bookmarks_bookmark;"),
migrations.RunSQL("ALTER TABLE core_bookmark RENAME TO bookmarks_bookmark;"),
migrations.RunSQL("UPDATE django_content_type SET app_label='bookmarks' WHERE app_label='core';"),
]
|
|
206d1d2eb2b1ead51621670dd3341baeb21ae271
|
CodeFights/spiralNumbers.py
|
CodeFights/spiralNumbers.py
|
#!/usr/local/bin/python
# Code Fights Spiral Numbers Problem
def spiralNumbers(n):
r, c = 0, 0 # Starting location
# Delta for row or column increments: first direction is left to right
dr, dc = 0, 1
spiral = [[0] * n for _ in range(n)]
for i in range(1, n * n + 1):
spiral[r][c] = i
testr, testc = r + dr, c + dc
if 0 <= testr < n and 0 <= testc < n and spiral[testr][testc] == 0:
r, c = testr, testc
else:
dr, dc = dc, -dr
r, c = r + dr, c + dc
return spiral
def main():
tests = [
[
3,
[[1, 2, 3], [8, 9, 4], [7, 6, 5]]
],
[
5,
[[1, 2, 3, 4, 5],
[16, 17, 18, 19, 6],
[15, 24, 25, 20, 7],
[14, 23, 22, 21, 8],
[13, 12, 11, 10, 9]]
],
[
6,
[[1, 2, 3, 4, 5, 6],
[20, 21, 22, 23, 24, 7],
[19, 32, 33, 34, 25, 8],
[18, 31, 36, 35, 26, 9],
[17, 30, 29, 28, 27, 10],
[16, 15, 14, 13, 12, 11]]
]
]
for t in tests:
res = spiralNumbers(t[0])
ans = t[1]
if ans == res:
print("PASSED: spiralNumbers({}) returned {}"
.format(t[0], res))
else:
print("FAILED: spiralNumbers({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights spiral numbers problem
|
Solve Code Fights spiral numbers problem
|
Python
|
mit
|
HKuz/Test_Code
|
Solve Code Fights spiral numbers problem
|
#!/usr/local/bin/python
# Code Fights Spiral Numbers Problem
def spiralNumbers(n):
r, c = 0, 0 # Starting location
# Delta for row or column increments: first direction is left to right
dr, dc = 0, 1
spiral = [[0] * n for _ in range(n)]
for i in range(1, n * n + 1):
spiral[r][c] = i
testr, testc = r + dr, c + dc
if 0 <= testr < n and 0 <= testc < n and spiral[testr][testc] == 0:
r, c = testr, testc
else:
dr, dc = dc, -dr
r, c = r + dr, c + dc
return spiral
def main():
tests = [
[
3,
[[1, 2, 3], [8, 9, 4], [7, 6, 5]]
],
[
5,
[[1, 2, 3, 4, 5],
[16, 17, 18, 19, 6],
[15, 24, 25, 20, 7],
[14, 23, 22, 21, 8],
[13, 12, 11, 10, 9]]
],
[
6,
[[1, 2, 3, 4, 5, 6],
[20, 21, 22, 23, 24, 7],
[19, 32, 33, 34, 25, 8],
[18, 31, 36, 35, 26, 9],
[17, 30, 29, 28, 27, 10],
[16, 15, 14, 13, 12, 11]]
]
]
for t in tests:
res = spiralNumbers(t[0])
ans = t[1]
if ans == res:
print("PASSED: spiralNumbers({}) returned {}"
.format(t[0], res))
else:
print("FAILED: spiralNumbers({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights spiral numbers problem<commit_after>
|
#!/usr/local/bin/python
# Code Fights Spiral Numbers Problem
def spiralNumbers(n):
r, c = 0, 0 # Starting location
# Delta for row or column increments: first direction is left to right
dr, dc = 0, 1
spiral = [[0] * n for _ in range(n)]
for i in range(1, n * n + 1):
spiral[r][c] = i
testr, testc = r + dr, c + dc
if 0 <= testr < n and 0 <= testc < n and spiral[testr][testc] == 0:
r, c = testr, testc
else:
dr, dc = dc, -dr
r, c = r + dr, c + dc
return spiral
def main():
tests = [
[
3,
[[1, 2, 3], [8, 9, 4], [7, 6, 5]]
],
[
5,
[[1, 2, 3, 4, 5],
[16, 17, 18, 19, 6],
[15, 24, 25, 20, 7],
[14, 23, 22, 21, 8],
[13, 12, 11, 10, 9]]
],
[
6,
[[1, 2, 3, 4, 5, 6],
[20, 21, 22, 23, 24, 7],
[19, 32, 33, 34, 25, 8],
[18, 31, 36, 35, 26, 9],
[17, 30, 29, 28, 27, 10],
[16, 15, 14, 13, 12, 11]]
]
]
for t in tests:
res = spiralNumbers(t[0])
ans = t[1]
if ans == res:
print("PASSED: spiralNumbers({}) returned {}"
.format(t[0], res))
else:
print("FAILED: spiralNumbers({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights spiral numbers problem#!/usr/local/bin/python
# Code Fights Spiral Numbers Problem
def spiralNumbers(n):
r, c = 0, 0 # Starting location
# Delta for row or column increments: first direction is left to right
dr, dc = 0, 1
spiral = [[0] * n for _ in range(n)]
for i in range(1, n * n + 1):
spiral[r][c] = i
testr, testc = r + dr, c + dc
if 0 <= testr < n and 0 <= testc < n and spiral[testr][testc] == 0:
r, c = testr, testc
else:
dr, dc = dc, -dr
r, c = r + dr, c + dc
return spiral
def main():
tests = [
[
3,
[[1, 2, 3], [8, 9, 4], [7, 6, 5]]
],
[
5,
[[1, 2, 3, 4, 5],
[16, 17, 18, 19, 6],
[15, 24, 25, 20, 7],
[14, 23, 22, 21, 8],
[13, 12, 11, 10, 9]]
],
[
6,
[[1, 2, 3, 4, 5, 6],
[20, 21, 22, 23, 24, 7],
[19, 32, 33, 34, 25, 8],
[18, 31, 36, 35, 26, 9],
[17, 30, 29, 28, 27, 10],
[16, 15, 14, 13, 12, 11]]
]
]
for t in tests:
res = spiralNumbers(t[0])
ans = t[1]
if ans == res:
print("PASSED: spiralNumbers({}) returned {}"
.format(t[0], res))
else:
print("FAILED: spiralNumbers({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights spiral numbers problem<commit_after>#!/usr/local/bin/python
# Code Fights Spiral Numbers Problem
def spiralNumbers(n):
r, c = 0, 0 # Starting location
# Delta for row or column increments: first direction is left to right
dr, dc = 0, 1
spiral = [[0] * n for _ in range(n)]
for i in range(1, n * n + 1):
spiral[r][c] = i
testr, testc = r + dr, c + dc
if 0 <= testr < n and 0 <= testc < n and spiral[testr][testc] == 0:
r, c = testr, testc
else:
dr, dc = dc, -dr
r, c = r + dr, c + dc
return spiral
def main():
tests = [
[
3,
[[1, 2, 3], [8, 9, 4], [7, 6, 5]]
],
[
5,
[[1, 2, 3, 4, 5],
[16, 17, 18, 19, 6],
[15, 24, 25, 20, 7],
[14, 23, 22, 21, 8],
[13, 12, 11, 10, 9]]
],
[
6,
[[1, 2, 3, 4, 5, 6],
[20, 21, 22, 23, 24, 7],
[19, 32, 33, 34, 25, 8],
[18, 31, 36, 35, 26, 9],
[17, 30, 29, 28, 27, 10],
[16, 15, 14, 13, 12, 11]]
]
]
for t in tests:
res = spiralNumbers(t[0])
ans = t[1]
if ans == res:
print("PASSED: spiralNumbers({}) returned {}"
.format(t[0], res))
else:
print("FAILED: spiralNumbers({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
|
dae0ec0b70ed68a5fa6ae29225c4995256f4ec80
|
CodeFights/isBeautifulString.py
|
CodeFights/isBeautifulString.py
|
#!/usr/local/bin/python
# Code Fights Is Beautiful String Problem
import string
def isBeautifulString(inputString):
keys = string.ascii_lowercase
for i in range(1, len(keys)):
if inputString.count(keys[i]) > inputString.count(keys[i - 1]):
return False
return True
def main():
tests = [
["bbbaacdafe", True],
["aabbb", False],
["bbc", False],
["bbbaa", False],
["abcdefghijklmnopqrstuvwxyzz", False],
["abcdefghijklmnopqrstuvwxyz", True],
["abcdefghijklmnopqrstuvwxyzqwertuiopasdfghjklxcvbnm", True],
["fyudhrygiuhdfeis", False],
["zaa", False],
["zyy", False]
]
for t in tests:
res = isBeautifulString(t[0])
ans = t[1]
if ans == res:
print("PASSED: isBeautifulString({}) returned {}"
.format(t[0], res))
else:
print(("FAILED: isBeautifulString({}) returned {},"
"answer: {}").format(t[0], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights is beautiful string problem
|
Solve Code Fights is beautiful string problem
|
Python
|
mit
|
HKuz/Test_Code
|
Solve Code Fights is beautiful string problem
|
#!/usr/local/bin/python
# Code Fights Is Beautiful String Problem
import string
def isBeautifulString(inputString):
keys = string.ascii_lowercase
for i in range(1, len(keys)):
if inputString.count(keys[i]) > inputString.count(keys[i - 1]):
return False
return True
def main():
tests = [
["bbbaacdafe", True],
["aabbb", False],
["bbc", False],
["bbbaa", False],
["abcdefghijklmnopqrstuvwxyzz", False],
["abcdefghijklmnopqrstuvwxyz", True],
["abcdefghijklmnopqrstuvwxyzqwertuiopasdfghjklxcvbnm", True],
["fyudhrygiuhdfeis", False],
["zaa", False],
["zyy", False]
]
for t in tests:
res = isBeautifulString(t[0])
ans = t[1]
if ans == res:
print("PASSED: isBeautifulString({}) returned {}"
.format(t[0], res))
else:
print(("FAILED: isBeautifulString({}) returned {},"
"answer: {}").format(t[0], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights is beautiful string problem<commit_after>
|
#!/usr/local/bin/python
# Code Fights Is Beautiful String Problem
import string
def isBeautifulString(inputString):
keys = string.ascii_lowercase
for i in range(1, len(keys)):
if inputString.count(keys[i]) > inputString.count(keys[i - 1]):
return False
return True
def main():
tests = [
["bbbaacdafe", True],
["aabbb", False],
["bbc", False],
["bbbaa", False],
["abcdefghijklmnopqrstuvwxyzz", False],
["abcdefghijklmnopqrstuvwxyz", True],
["abcdefghijklmnopqrstuvwxyzqwertuiopasdfghjklxcvbnm", True],
["fyudhrygiuhdfeis", False],
["zaa", False],
["zyy", False]
]
for t in tests:
res = isBeautifulString(t[0])
ans = t[1]
if ans == res:
print("PASSED: isBeautifulString({}) returned {}"
.format(t[0], res))
else:
print(("FAILED: isBeautifulString({}) returned {},"
"answer: {}").format(t[0], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights is beautiful string problem#!/usr/local/bin/python
# Code Fights Is Beautiful String Problem
import string
def isBeautifulString(inputString):
keys = string.ascii_lowercase
for i in range(1, len(keys)):
if inputString.count(keys[i]) > inputString.count(keys[i - 1]):
return False
return True
def main():
tests = [
["bbbaacdafe", True],
["aabbb", False],
["bbc", False],
["bbbaa", False],
["abcdefghijklmnopqrstuvwxyzz", False],
["abcdefghijklmnopqrstuvwxyz", True],
["abcdefghijklmnopqrstuvwxyzqwertuiopasdfghjklxcvbnm", True],
["fyudhrygiuhdfeis", False],
["zaa", False],
["zyy", False]
]
for t in tests:
res = isBeautifulString(t[0])
ans = t[1]
if ans == res:
print("PASSED: isBeautifulString({}) returned {}"
.format(t[0], res))
else:
print(("FAILED: isBeautifulString({}) returned {},"
"answer: {}").format(t[0], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights is beautiful string problem<commit_after>#!/usr/local/bin/python
# Code Fights Is Beautiful String Problem
import string
def isBeautifulString(inputString):
keys = string.ascii_lowercase
for i in range(1, len(keys)):
if inputString.count(keys[i]) > inputString.count(keys[i - 1]):
return False
return True
def main():
tests = [
["bbbaacdafe", True],
["aabbb", False],
["bbc", False],
["bbbaa", False],
["abcdefghijklmnopqrstuvwxyzz", False],
["abcdefghijklmnopqrstuvwxyz", True],
["abcdefghijklmnopqrstuvwxyzqwertuiopasdfghjklxcvbnm", True],
["fyudhrygiuhdfeis", False],
["zaa", False],
["zyy", False]
]
for t in tests:
res = isBeautifulString(t[0])
ans = t[1]
if ans == res:
print("PASSED: isBeautifulString({}) returned {}"
.format(t[0], res))
else:
print(("FAILED: isBeautifulString({}) returned {},"
"answer: {}").format(t[0], res, ans))
if __name__ == '__main__':
main()
|
|
f53e12b1e3d939d1c98037589da81c459a61616f
|
papermill/tests/test_hdfs.py
|
papermill/tests/test_hdfs.py
|
import unittest
from unittest.mock import MagicMock, patch
from ..iorw import HDFSHandler
class MockHadoopFileSystem(MagicMock):
def ls(self, path):
return ['test1.ipynb', 'test2.ipynb']
def open(self, path, *args):
return MockHadoopFile()
class MockHadoopFile(object):
def __init__(self):
self._content = b'Content of notebook'
def __enter__(self, *args):
return self
def __exit__(self, *args):
pass
def read(self):
return self._content
def write(self, new_content):
self._content = new_content
return 1
@patch('papermill.iorw.HadoopFileSystem', side_effect=MockHadoopFileSystem())
class HDFSTest(unittest.TestCase):
def setUp(self):
self.hdfs_handler = HDFSHandler()
def test_hdfs_listdir(self, mock_hdfs_filesystem):
client = self.hdfs_handler._get_client()
self.assertEqual(self.hdfs_handler.listdir("hdfs:///Projects/"), ['test1.ipynb', 'test2.ipynb'])
# Check if client is the same after calling
self.assertIs(client, self.hdfs_handler._get_client())
def test_hdfs_read(self, mock_hdfs_filesystem):
client = self.hdfs_handler._get_client()
self.assertEqual(self.hdfs_handler.read("hdfs:///Projects/test1.ipynb"), b'Content of notebook')
self.assertIs(client, self.hdfs_handler._get_client())
def test_hdfs_write(self, mock_hdfs_filesystem):
client = self.hdfs_handler._get_client()
self.assertEqual(self.hdfs_handler.write("hdfs:///Projects/test1.ipynb", b'New content'), 1)
self.assertIs(client, self.hdfs_handler._get_client())
|
Add test for hdfs handler
|
Add test for hdfs handler
|
Python
|
bsd-3-clause
|
nteract/papermill,nteract/papermill
|
Add test for hdfs handler
|
import unittest
from unittest.mock import MagicMock, patch
from ..iorw import HDFSHandler
class MockHadoopFileSystem(MagicMock):
def ls(self, path):
return ['test1.ipynb', 'test2.ipynb']
def open(self, path, *args):
return MockHadoopFile()
class MockHadoopFile(object):
def __init__(self):
self._content = b'Content of notebook'
def __enter__(self, *args):
return self
def __exit__(self, *args):
pass
def read(self):
return self._content
def write(self, new_content):
self._content = new_content
return 1
@patch('papermill.iorw.HadoopFileSystem', side_effect=MockHadoopFileSystem())
class HDFSTest(unittest.TestCase):
def setUp(self):
self.hdfs_handler = HDFSHandler()
def test_hdfs_listdir(self, mock_hdfs_filesystem):
client = self.hdfs_handler._get_client()
self.assertEqual(self.hdfs_handler.listdir("hdfs:///Projects/"), ['test1.ipynb', 'test2.ipynb'])
# Check if client is the same after calling
self.assertIs(client, self.hdfs_handler._get_client())
def test_hdfs_read(self, mock_hdfs_filesystem):
client = self.hdfs_handler._get_client()
self.assertEqual(self.hdfs_handler.read("hdfs:///Projects/test1.ipynb"), b'Content of notebook')
self.assertIs(client, self.hdfs_handler._get_client())
def test_hdfs_write(self, mock_hdfs_filesystem):
client = self.hdfs_handler._get_client()
self.assertEqual(self.hdfs_handler.write("hdfs:///Projects/test1.ipynb", b'New content'), 1)
self.assertIs(client, self.hdfs_handler._get_client())
|
<commit_before><commit_msg>Add test for hdfs handler<commit_after>
|
import unittest
from unittest.mock import MagicMock, patch
from ..iorw import HDFSHandler
class MockHadoopFileSystem(MagicMock):
def ls(self, path):
return ['test1.ipynb', 'test2.ipynb']
def open(self, path, *args):
return MockHadoopFile()
class MockHadoopFile(object):
def __init__(self):
self._content = b'Content of notebook'
def __enter__(self, *args):
return self
def __exit__(self, *args):
pass
def read(self):
return self._content
def write(self, new_content):
self._content = new_content
return 1
@patch('papermill.iorw.HadoopFileSystem', side_effect=MockHadoopFileSystem())
class HDFSTest(unittest.TestCase):
def setUp(self):
self.hdfs_handler = HDFSHandler()
def test_hdfs_listdir(self, mock_hdfs_filesystem):
client = self.hdfs_handler._get_client()
self.assertEqual(self.hdfs_handler.listdir("hdfs:///Projects/"), ['test1.ipynb', 'test2.ipynb'])
# Check if client is the same after calling
self.assertIs(client, self.hdfs_handler._get_client())
def test_hdfs_read(self, mock_hdfs_filesystem):
client = self.hdfs_handler._get_client()
self.assertEqual(self.hdfs_handler.read("hdfs:///Projects/test1.ipynb"), b'Content of notebook')
self.assertIs(client, self.hdfs_handler._get_client())
def test_hdfs_write(self, mock_hdfs_filesystem):
client = self.hdfs_handler._get_client()
self.assertEqual(self.hdfs_handler.write("hdfs:///Projects/test1.ipynb", b'New content'), 1)
self.assertIs(client, self.hdfs_handler._get_client())
|
Add test for hdfs handlerimport unittest
from unittest.mock import MagicMock, patch
from ..iorw import HDFSHandler
class MockHadoopFileSystem(MagicMock):
def ls(self, path):
return ['test1.ipynb', 'test2.ipynb']
def open(self, path, *args):
return MockHadoopFile()
class MockHadoopFile(object):
def __init__(self):
self._content = b'Content of notebook'
def __enter__(self, *args):
return self
def __exit__(self, *args):
pass
def read(self):
return self._content
def write(self, new_content):
self._content = new_content
return 1
@patch('papermill.iorw.HadoopFileSystem', side_effect=MockHadoopFileSystem())
class HDFSTest(unittest.TestCase):
def setUp(self):
self.hdfs_handler = HDFSHandler()
def test_hdfs_listdir(self, mock_hdfs_filesystem):
client = self.hdfs_handler._get_client()
self.assertEqual(self.hdfs_handler.listdir("hdfs:///Projects/"), ['test1.ipynb', 'test2.ipynb'])
# Check if client is the same after calling
self.assertIs(client, self.hdfs_handler._get_client())
def test_hdfs_read(self, mock_hdfs_filesystem):
client = self.hdfs_handler._get_client()
self.assertEqual(self.hdfs_handler.read("hdfs:///Projects/test1.ipynb"), b'Content of notebook')
self.assertIs(client, self.hdfs_handler._get_client())
def test_hdfs_write(self, mock_hdfs_filesystem):
client = self.hdfs_handler._get_client()
self.assertEqual(self.hdfs_handler.write("hdfs:///Projects/test1.ipynb", b'New content'), 1)
self.assertIs(client, self.hdfs_handler._get_client())
|
<commit_before><commit_msg>Add test for hdfs handler<commit_after>import unittest
from unittest.mock import MagicMock, patch
from ..iorw import HDFSHandler
class MockHadoopFileSystem(MagicMock):
def ls(self, path):
return ['test1.ipynb', 'test2.ipynb']
def open(self, path, *args):
return MockHadoopFile()
class MockHadoopFile(object):
def __init__(self):
self._content = b'Content of notebook'
def __enter__(self, *args):
return self
def __exit__(self, *args):
pass
def read(self):
return self._content
def write(self, new_content):
self._content = new_content
return 1
@patch('papermill.iorw.HadoopFileSystem', side_effect=MockHadoopFileSystem())
class HDFSTest(unittest.TestCase):
def setUp(self):
self.hdfs_handler = HDFSHandler()
def test_hdfs_listdir(self, mock_hdfs_filesystem):
client = self.hdfs_handler._get_client()
self.assertEqual(self.hdfs_handler.listdir("hdfs:///Projects/"), ['test1.ipynb', 'test2.ipynb'])
# Check if client is the same after calling
self.assertIs(client, self.hdfs_handler._get_client())
def test_hdfs_read(self, mock_hdfs_filesystem):
client = self.hdfs_handler._get_client()
self.assertEqual(self.hdfs_handler.read("hdfs:///Projects/test1.ipynb"), b'Content of notebook')
self.assertIs(client, self.hdfs_handler._get_client())
def test_hdfs_write(self, mock_hdfs_filesystem):
client = self.hdfs_handler._get_client()
self.assertEqual(self.hdfs_handler.write("hdfs:///Projects/test1.ipynb", b'New content'), 1)
self.assertIs(client, self.hdfs_handler._get_client())
|
|
b5c9fffab5d0c2082d0fcd55967e033272c36acc
|
flickrest.py
|
flickrest.py
|
import md5, urllib
from twisted.internet import defer
from twisted.web import client
from elementtree import ElementTree
class FlickREST:
endpoint = "http://api.flickr.com/services/rest/?"
def __init__(self, api_key, secret, perms="read"):
self.api_key = api_key
self.secret = secret
self.perms = perms
self.token = None
def __sign(self, kwargs):
kwargs['api_key'] = self.api_key
# If authenticating we don't yet have a token
if self.token:
kwargs['auth_token'] = self.token
s = []
for key in kwargs.keys():
s.append("%s%s" % (key, kwargs[key]))
s.sort()
sig = md5.new(self.secret + ''.join(s)).hexdigest()
kwargs['api_sig'] = sig
def call(self, method, **kwargs):
kwargs["method"] = method
self.__sign(kwargs)
d = defer.Deferred()
def cb(data):
xml = ElementTree.XML(data.encode("utf-8"))
d.callback(xml)
# TODO: but I want to do POST...
client.getPage(FlickREST.endpoint + urllib.urlencode(kwargs)).addCallback(cb)
return d
if __name__ == "__main__":
from twisted.internet import reactor
flickr = FlickREST("c53cebd15ed936073134cec858036f1d", "7db1b8ef68979779", "read")
d = flickr.call("flickr.auth.getFrob")
def foo(p):
print p
d.addCallback(foo)
reactor.run()
|
Add prototype and incomplete REST interface
|
Add prototype and incomplete REST interface
|
Python
|
lgpl-2.1
|
rossburton/flickrest
|
Add prototype and incomplete REST interface
|
import md5, urllib
from twisted.internet import defer
from twisted.web import client
from elementtree import ElementTree
class FlickREST:
endpoint = "http://api.flickr.com/services/rest/?"
def __init__(self, api_key, secret, perms="read"):
self.api_key = api_key
self.secret = secret
self.perms = perms
self.token = None
def __sign(self, kwargs):
kwargs['api_key'] = self.api_key
# If authenticating we don't yet have a token
if self.token:
kwargs['auth_token'] = self.token
s = []
for key in kwargs.keys():
s.append("%s%s" % (key, kwargs[key]))
s.sort()
sig = md5.new(self.secret + ''.join(s)).hexdigest()
kwargs['api_sig'] = sig
def call(self, method, **kwargs):
kwargs["method"] = method
self.__sign(kwargs)
d = defer.Deferred()
def cb(data):
xml = ElementTree.XML(data.encode("utf-8"))
d.callback(xml)
# TODO: but I want to do POST...
client.getPage(FlickREST.endpoint + urllib.urlencode(kwargs)).addCallback(cb)
return d
if __name__ == "__main__":
from twisted.internet import reactor
flickr = FlickREST("c53cebd15ed936073134cec858036f1d", "7db1b8ef68979779", "read")
d = flickr.call("flickr.auth.getFrob")
def foo(p):
print p
d.addCallback(foo)
reactor.run()
|
<commit_before><commit_msg>Add prototype and incomplete REST interface<commit_after>
|
import md5, urllib
from twisted.internet import defer
from twisted.web import client
from elementtree import ElementTree
class FlickREST:
endpoint = "http://api.flickr.com/services/rest/?"
def __init__(self, api_key, secret, perms="read"):
self.api_key = api_key
self.secret = secret
self.perms = perms
self.token = None
def __sign(self, kwargs):
kwargs['api_key'] = self.api_key
# If authenticating we don't yet have a token
if self.token:
kwargs['auth_token'] = self.token
s = []
for key in kwargs.keys():
s.append("%s%s" % (key, kwargs[key]))
s.sort()
sig = md5.new(self.secret + ''.join(s)).hexdigest()
kwargs['api_sig'] = sig
def call(self, method, **kwargs):
kwargs["method"] = method
self.__sign(kwargs)
d = defer.Deferred()
def cb(data):
xml = ElementTree.XML(data.encode("utf-8"))
d.callback(xml)
# TODO: but I want to do POST...
client.getPage(FlickREST.endpoint + urllib.urlencode(kwargs)).addCallback(cb)
return d
if __name__ == "__main__":
from twisted.internet import reactor
flickr = FlickREST("c53cebd15ed936073134cec858036f1d", "7db1b8ef68979779", "read")
d = flickr.call("flickr.auth.getFrob")
def foo(p):
print p
d.addCallback(foo)
reactor.run()
|
Add prototype and incomplete REST interfaceimport md5, urllib
from twisted.internet import defer
from twisted.web import client
from elementtree import ElementTree
class FlickREST:
endpoint = "http://api.flickr.com/services/rest/?"
def __init__(self, api_key, secret, perms="read"):
self.api_key = api_key
self.secret = secret
self.perms = perms
self.token = None
def __sign(self, kwargs):
kwargs['api_key'] = self.api_key
# If authenticating we don't yet have a token
if self.token:
kwargs['auth_token'] = self.token
s = []
for key in kwargs.keys():
s.append("%s%s" % (key, kwargs[key]))
s.sort()
sig = md5.new(self.secret + ''.join(s)).hexdigest()
kwargs['api_sig'] = sig
def call(self, method, **kwargs):
kwargs["method"] = method
self.__sign(kwargs)
d = defer.Deferred()
def cb(data):
xml = ElementTree.XML(data.encode("utf-8"))
d.callback(xml)
# TODO: but I want to do POST...
client.getPage(FlickREST.endpoint + urllib.urlencode(kwargs)).addCallback(cb)
return d
if __name__ == "__main__":
from twisted.internet import reactor
flickr = FlickREST("c53cebd15ed936073134cec858036f1d", "7db1b8ef68979779", "read")
d = flickr.call("flickr.auth.getFrob")
def foo(p):
print p
d.addCallback(foo)
reactor.run()
|
<commit_before><commit_msg>Add prototype and incomplete REST interface<commit_after>import md5, urllib
from twisted.internet import defer
from twisted.web import client
from elementtree import ElementTree
class FlickREST:
endpoint = "http://api.flickr.com/services/rest/?"
def __init__(self, api_key, secret, perms="read"):
self.api_key = api_key
self.secret = secret
self.perms = perms
self.token = None
def __sign(self, kwargs):
kwargs['api_key'] = self.api_key
# If authenticating we don't yet have a token
if self.token:
kwargs['auth_token'] = self.token
s = []
for key in kwargs.keys():
s.append("%s%s" % (key, kwargs[key]))
s.sort()
sig = md5.new(self.secret + ''.join(s)).hexdigest()
kwargs['api_sig'] = sig
def call(self, method, **kwargs):
kwargs["method"] = method
self.__sign(kwargs)
d = defer.Deferred()
def cb(data):
xml = ElementTree.XML(data.encode("utf-8"))
d.callback(xml)
# TODO: but I want to do POST...
client.getPage(FlickREST.endpoint + urllib.urlencode(kwargs)).addCallback(cb)
return d
if __name__ == "__main__":
from twisted.internet import reactor
flickr = FlickREST("c53cebd15ed936073134cec858036f1d", "7db1b8ef68979779", "read")
d = flickr.call("flickr.auth.getFrob")
def foo(p):
print p
d.addCallback(foo)
reactor.run()
|
|
0343ecb6bbc239a3e2542cbb0257f66bfb34a2b1
|
tests/test_rietveld.py
|
tests/test_rietveld.py
|
from __future__ import absolute_import, print_function
import unittest
import os
from addie.addiedriver import AddieDriver
from tests import DATA_DIR
TOF = 'TOF'
D_SPACING = 'dSpacing'
Q_SPACE = 'MomentumTransfer'
class BraggData(unittest.TestCase):
def setUp(self):
self.filename = os.path.join(DATA_DIR, 'NOM_127827.gsa')
def test_get_data(self):
driver = AddieDriver()
# load the data
wkspname = driver.load_bragg_file(self.filename)
# create a bunch of individual spectra
# FIXME should just use the original workspace itself
groupWkspName, banks_list, bank_angles = driver.split_to_single_bank(wkspname)
print(wkspname, groupWkspName)
for units in (TOF, D_SPACING, Q_SPACE):
for wkspIndex in range(6):
x, y, dy = driver.get_bragg_data(groupWkspName, wkspIndex + 1, units)
self.assertEqual(len(x), len(y))
self.assertEqual(len(y), len(dy))
self.assertLess(x[0], x[-1], 'xmin[{}] >= xmax[{}]'.format(x[0], x[-1]))
if units == TOF: # these values are copied from the GSAS file
if wkspIndex == 0:
self.assertEqual(x[0], 743.140027596)
self.assertEqual(x[-1], 8971.042698148)
elif wkspIndex == 1:
self.assertEqual(x[0], 887.289527377)
self.assertEqual(x[-1], 17966.432721196)
elif wkspIndex == 2:
self.assertEqual(x[0], 1009.427358717)
self.assertEqual(x[-1], 19058.487769870)
elif wkspIndex == 3:
self.assertEqual(x[0], 1175.684429098)
self.assertEqual(x[-1], 17176.602366475)
elif wkspIndex == 4:
self.assertEqual(x[0], 1288.635270161)
self.assertEqual(x[-1], 15260.397565064)
elif wkspIndex == 5:
self.assertEqual(x[0], 858.293757585)
self.assertEqual(x[-1], 10270.673982962)
if __name__ == '__main__':
unittest.main() # pragma: no cover
|
Add unit tests for loading gsas data
|
Add unit tests for loading gsas data
|
Python
|
mit
|
neutrons/FastGR,neutrons/FastGR,neutrons/FastGR
|
Add unit tests for loading gsas data
|
from __future__ import absolute_import, print_function
import unittest
import os
from addie.addiedriver import AddieDriver
from tests import DATA_DIR
TOF = 'TOF'
D_SPACING = 'dSpacing'
Q_SPACE = 'MomentumTransfer'
class BraggData(unittest.TestCase):
def setUp(self):
self.filename = os.path.join(DATA_DIR, 'NOM_127827.gsa')
def test_get_data(self):
driver = AddieDriver()
# load the data
wkspname = driver.load_bragg_file(self.filename)
# create a bunch of individual spectra
# FIXME should just use the original workspace itself
groupWkspName, banks_list, bank_angles = driver.split_to_single_bank(wkspname)
print(wkspname, groupWkspName)
for units in (TOF, D_SPACING, Q_SPACE):
for wkspIndex in range(6):
x, y, dy = driver.get_bragg_data(groupWkspName, wkspIndex + 1, units)
self.assertEqual(len(x), len(y))
self.assertEqual(len(y), len(dy))
self.assertLess(x[0], x[-1], 'xmin[{}] >= xmax[{}]'.format(x[0], x[-1]))
if units == TOF: # these values are copied from the GSAS file
if wkspIndex == 0:
self.assertEqual(x[0], 743.140027596)
self.assertEqual(x[-1], 8971.042698148)
elif wkspIndex == 1:
self.assertEqual(x[0], 887.289527377)
self.assertEqual(x[-1], 17966.432721196)
elif wkspIndex == 2:
self.assertEqual(x[0], 1009.427358717)
self.assertEqual(x[-1], 19058.487769870)
elif wkspIndex == 3:
self.assertEqual(x[0], 1175.684429098)
self.assertEqual(x[-1], 17176.602366475)
elif wkspIndex == 4:
self.assertEqual(x[0], 1288.635270161)
self.assertEqual(x[-1], 15260.397565064)
elif wkspIndex == 5:
self.assertEqual(x[0], 858.293757585)
self.assertEqual(x[-1], 10270.673982962)
if __name__ == '__main__':
unittest.main() # pragma: no cover
|
<commit_before><commit_msg>Add unit tests for loading gsas data<commit_after>
|
from __future__ import absolute_import, print_function
import unittest
import os
from addie.addiedriver import AddieDriver
from tests import DATA_DIR
TOF = 'TOF'
D_SPACING = 'dSpacing'
Q_SPACE = 'MomentumTransfer'
class BraggData(unittest.TestCase):
def setUp(self):
self.filename = os.path.join(DATA_DIR, 'NOM_127827.gsa')
def test_get_data(self):
driver = AddieDriver()
# load the data
wkspname = driver.load_bragg_file(self.filename)
# create a bunch of individual spectra
# FIXME should just use the original workspace itself
groupWkspName, banks_list, bank_angles = driver.split_to_single_bank(wkspname)
print(wkspname, groupWkspName)
for units in (TOF, D_SPACING, Q_SPACE):
for wkspIndex in range(6):
x, y, dy = driver.get_bragg_data(groupWkspName, wkspIndex + 1, units)
self.assertEqual(len(x), len(y))
self.assertEqual(len(y), len(dy))
self.assertLess(x[0], x[-1], 'xmin[{}] >= xmax[{}]'.format(x[0], x[-1]))
if units == TOF: # these values are copied from the GSAS file
if wkspIndex == 0:
self.assertEqual(x[0], 743.140027596)
self.assertEqual(x[-1], 8971.042698148)
elif wkspIndex == 1:
self.assertEqual(x[0], 887.289527377)
self.assertEqual(x[-1], 17966.432721196)
elif wkspIndex == 2:
self.assertEqual(x[0], 1009.427358717)
self.assertEqual(x[-1], 19058.487769870)
elif wkspIndex == 3:
self.assertEqual(x[0], 1175.684429098)
self.assertEqual(x[-1], 17176.602366475)
elif wkspIndex == 4:
self.assertEqual(x[0], 1288.635270161)
self.assertEqual(x[-1], 15260.397565064)
elif wkspIndex == 5:
self.assertEqual(x[0], 858.293757585)
self.assertEqual(x[-1], 10270.673982962)
if __name__ == '__main__':
unittest.main() # pragma: no cover
|
Add unit tests for loading gsas datafrom __future__ import absolute_import, print_function
import unittest
import os
from addie.addiedriver import AddieDriver
from tests import DATA_DIR
TOF = 'TOF'
D_SPACING = 'dSpacing'
Q_SPACE = 'MomentumTransfer'
class BraggData(unittest.TestCase):
def setUp(self):
self.filename = os.path.join(DATA_DIR, 'NOM_127827.gsa')
def test_get_data(self):
driver = AddieDriver()
# load the data
wkspname = driver.load_bragg_file(self.filename)
# create a bunch of individual spectra
# FIXME should just use the original workspace itself
groupWkspName, banks_list, bank_angles = driver.split_to_single_bank(wkspname)
print(wkspname, groupWkspName)
for units in (TOF, D_SPACING, Q_SPACE):
for wkspIndex in range(6):
x, y, dy = driver.get_bragg_data(groupWkspName, wkspIndex + 1, units)
self.assertEqual(len(x), len(y))
self.assertEqual(len(y), len(dy))
self.assertLess(x[0], x[-1], 'xmin[{}] >= xmax[{}]'.format(x[0], x[-1]))
if units == TOF: # these values are copied from the GSAS file
if wkspIndex == 0:
self.assertEqual(x[0], 743.140027596)
self.assertEqual(x[-1], 8971.042698148)
elif wkspIndex == 1:
self.assertEqual(x[0], 887.289527377)
self.assertEqual(x[-1], 17966.432721196)
elif wkspIndex == 2:
self.assertEqual(x[0], 1009.427358717)
self.assertEqual(x[-1], 19058.487769870)
elif wkspIndex == 3:
self.assertEqual(x[0], 1175.684429098)
self.assertEqual(x[-1], 17176.602366475)
elif wkspIndex == 4:
self.assertEqual(x[0], 1288.635270161)
self.assertEqual(x[-1], 15260.397565064)
elif wkspIndex == 5:
self.assertEqual(x[0], 858.293757585)
self.assertEqual(x[-1], 10270.673982962)
if __name__ == '__main__':
unittest.main() # pragma: no cover
|
<commit_before><commit_msg>Add unit tests for loading gsas data<commit_after>from __future__ import absolute_import, print_function
import unittest
import os
from addie.addiedriver import AddieDriver
from tests import DATA_DIR
TOF = 'TOF'
D_SPACING = 'dSpacing'
Q_SPACE = 'MomentumTransfer'
class BraggData(unittest.TestCase):
def setUp(self):
self.filename = os.path.join(DATA_DIR, 'NOM_127827.gsa')
def test_get_data(self):
driver = AddieDriver()
# load the data
wkspname = driver.load_bragg_file(self.filename)
# create a bunch of individual spectra
# FIXME should just use the original workspace itself
groupWkspName, banks_list, bank_angles = driver.split_to_single_bank(wkspname)
print(wkspname, groupWkspName)
for units in (TOF, D_SPACING, Q_SPACE):
for wkspIndex in range(6):
x, y, dy = driver.get_bragg_data(groupWkspName, wkspIndex + 1, units)
self.assertEqual(len(x), len(y))
self.assertEqual(len(y), len(dy))
self.assertLess(x[0], x[-1], 'xmin[{}] >= xmax[{}]'.format(x[0], x[-1]))
if units == TOF: # these values are copied from the GSAS file
if wkspIndex == 0:
self.assertEqual(x[0], 743.140027596)
self.assertEqual(x[-1], 8971.042698148)
elif wkspIndex == 1:
self.assertEqual(x[0], 887.289527377)
self.assertEqual(x[-1], 17966.432721196)
elif wkspIndex == 2:
self.assertEqual(x[0], 1009.427358717)
self.assertEqual(x[-1], 19058.487769870)
elif wkspIndex == 3:
self.assertEqual(x[0], 1175.684429098)
self.assertEqual(x[-1], 17176.602366475)
elif wkspIndex == 4:
self.assertEqual(x[0], 1288.635270161)
self.assertEqual(x[-1], 15260.397565064)
elif wkspIndex == 5:
self.assertEqual(x[0], 858.293757585)
self.assertEqual(x[-1], 10270.673982962)
if __name__ == '__main__':
unittest.main() # pragma: no cover
|
|
ade6ab718b6fe29808e37aa9b852d502cc033980
|
haml/util.py
|
haml/util.py
|
def extract_haml(fileobj, keywords, comment_tags, options):
""" babel translation token extract function for haml files """
import haml
from mako import lexer, parsetree
from mako.ext.babelplugin import extract_nodes
encoding = options.get('input_encoding', options.get('encoding', None))
template_node = lexer.Lexer(haml.preprocessor(fileobj.read()), input_encoding=encoding).parse()
for extracted in extract_nodes(template_node.get_children(), keywords, comment_tags, options):
yield extracted
|
Add babel translation string extractor function
|
Add babel translation string extractor function
haml.util.extract_haml adds ability for babel to
recognize and extract gettext translation tokens from haml files
|
Python
|
bsd-3-clause
|
mikeboers/PyHAML
|
Add babel translation string extractor function
haml.util.extract_haml adds ability for babel to
recognize and extract gettext translation tokens from haml files
|
def extract_haml(fileobj, keywords, comment_tags, options):
""" babel translation token extract function for haml files """
import haml
from mako import lexer, parsetree
from mako.ext.babelplugin import extract_nodes
encoding = options.get('input_encoding', options.get('encoding', None))
template_node = lexer.Lexer(haml.preprocessor(fileobj.read()), input_encoding=encoding).parse()
for extracted in extract_nodes(template_node.get_children(), keywords, comment_tags, options):
yield extracted
|
<commit_before><commit_msg>Add babel translation string extractor function
haml.util.extract_haml adds ability for babel to
recognize and extract gettext translation tokens from haml files<commit_after>
|
def extract_haml(fileobj, keywords, comment_tags, options):
""" babel translation token extract function for haml files """
import haml
from mako import lexer, parsetree
from mako.ext.babelplugin import extract_nodes
encoding = options.get('input_encoding', options.get('encoding', None))
template_node = lexer.Lexer(haml.preprocessor(fileobj.read()), input_encoding=encoding).parse()
for extracted in extract_nodes(template_node.get_children(), keywords, comment_tags, options):
yield extracted
|
Add babel translation string extractor function
haml.util.extract_haml adds ability for babel to
recognize and extract gettext translation tokens from haml files
def extract_haml(fileobj, keywords, comment_tags, options):
""" babel translation token extract function for haml files """
import haml
from mako import lexer, parsetree
from mako.ext.babelplugin import extract_nodes
encoding = options.get('input_encoding', options.get('encoding', None))
template_node = lexer.Lexer(haml.preprocessor(fileobj.read()), input_encoding=encoding).parse()
for extracted in extract_nodes(template_node.get_children(), keywords, comment_tags, options):
yield extracted
|
<commit_before><commit_msg>Add babel translation string extractor function
haml.util.extract_haml adds ability for babel to
recognize and extract gettext translation tokens from haml files<commit_after>
def extract_haml(fileobj, keywords, comment_tags, options):
""" babel translation token extract function for haml files """
import haml
from mako import lexer, parsetree
from mako.ext.babelplugin import extract_nodes
encoding = options.get('input_encoding', options.get('encoding', None))
template_node = lexer.Lexer(haml.preprocessor(fileobj.read()), input_encoding=encoding).parse()
for extracted in extract_nodes(template_node.get_children(), keywords, comment_tags, options):
yield extracted
|
|
87179a4c040400aa7f5cc868744ffd298920caa6
|
scripts/data_download/secex/create_all_files.py
|
scripts/data_download/secex/create_all_files.py
|
import os
import commands
import time
import logging
import sys
if len(sys.argv) != 3 or (sys.argv[1:][0] not in ['pt', 'en']):
print "ERROR! Use:\n python scripts/data_download/secex/create_files.py en/pt output_path\n"
exit()
logging.basicConfig(filename=os.path.abspath(os.path.join(sys.argv[2],str(sys.argv[0].split('/')[2]) + '-all-data-download.log' )),level=logging.DEBUG)
for year in range(2002, 2017):
logging.info("python scripts/data_download/secex/create_files.py "+str(sys.argv[1])+" "+str(sys.argv[2])+" "+ str(year) + "\n")
ret = commands.getoutput("python scripts/data_download/secex/create_files.py "+str(sys.argv[1])+" "+str(sys.argv[2])+" "+ str(year))
logging.info(str(ret) + "\nYear: " + str(year) + " ok =D\n\n")
|
Add file to create all files to secex.
|
Add file to create all files to secex.
|
Python
|
mit
|
DataViva/dataviva-site,DataViva/dataviva-site,DataViva/dataviva-site,DataViva/dataviva-site
|
Add file to create all files to secex.
|
import os
import commands
import time
import logging
import sys
if len(sys.argv) != 3 or (sys.argv[1:][0] not in ['pt', 'en']):
print "ERROR! Use:\n python scripts/data_download/secex/create_files.py en/pt output_path\n"
exit()
logging.basicConfig(filename=os.path.abspath(os.path.join(sys.argv[2],str(sys.argv[0].split('/')[2]) + '-all-data-download.log' )),level=logging.DEBUG)
for year in range(2002, 2017):
logging.info("python scripts/data_download/secex/create_files.py "+str(sys.argv[1])+" "+str(sys.argv[2])+" "+ str(year) + "\n")
ret = commands.getoutput("python scripts/data_download/secex/create_files.py "+str(sys.argv[1])+" "+str(sys.argv[2])+" "+ str(year))
logging.info(str(ret) + "\nYear: " + str(year) + " ok =D\n\n")
|
<commit_before><commit_msg>Add file to create all files to secex.<commit_after>
|
import os
import commands
import time
import logging
import sys
if len(sys.argv) != 3 or (sys.argv[1:][0] not in ['pt', 'en']):
print "ERROR! Use:\n python scripts/data_download/secex/create_files.py en/pt output_path\n"
exit()
logging.basicConfig(filename=os.path.abspath(os.path.join(sys.argv[2],str(sys.argv[0].split('/')[2]) + '-all-data-download.log' )),level=logging.DEBUG)
for year in range(2002, 2017):
logging.info("python scripts/data_download/secex/create_files.py "+str(sys.argv[1])+" "+str(sys.argv[2])+" "+ str(year) + "\n")
ret = commands.getoutput("python scripts/data_download/secex/create_files.py "+str(sys.argv[1])+" "+str(sys.argv[2])+" "+ str(year))
logging.info(str(ret) + "\nYear: " + str(year) + " ok =D\n\n")
|
Add file to create all files to secex.import os
import commands
import time
import logging
import sys
if len(sys.argv) != 3 or (sys.argv[1:][0] not in ['pt', 'en']):
print "ERROR! Use:\n python scripts/data_download/secex/create_files.py en/pt output_path\n"
exit()
logging.basicConfig(filename=os.path.abspath(os.path.join(sys.argv[2],str(sys.argv[0].split('/')[2]) + '-all-data-download.log' )),level=logging.DEBUG)
for year in range(2002, 2017):
logging.info("python scripts/data_download/secex/create_files.py "+str(sys.argv[1])+" "+str(sys.argv[2])+" "+ str(year) + "\n")
ret = commands.getoutput("python scripts/data_download/secex/create_files.py "+str(sys.argv[1])+" "+str(sys.argv[2])+" "+ str(year))
logging.info(str(ret) + "\nYear: " + str(year) + " ok =D\n\n")
|
<commit_before><commit_msg>Add file to create all files to secex.<commit_after>import os
import commands
import time
import logging
import sys
if len(sys.argv) != 3 or (sys.argv[1:][0] not in ['pt', 'en']):
print "ERROR! Use:\n python scripts/data_download/secex/create_files.py en/pt output_path\n"
exit()
logging.basicConfig(filename=os.path.abspath(os.path.join(sys.argv[2],str(sys.argv[0].split('/')[2]) + '-all-data-download.log' )),level=logging.DEBUG)
for year in range(2002, 2017):
logging.info("python scripts/data_download/secex/create_files.py "+str(sys.argv[1])+" "+str(sys.argv[2])+" "+ str(year) + "\n")
ret = commands.getoutput("python scripts/data_download/secex/create_files.py "+str(sys.argv[1])+" "+str(sys.argv[2])+" "+ str(year))
logging.info(str(ret) + "\nYear: " + str(year) + " ok =D\n\n")
|
|
ce7994e255cd83d84259c369d1823aa7ac8c30cd
|
sahara/tests/unit/plugins/cdh/test_versionfactory.py
|
sahara/tests/unit/plugins/cdh/test_versionfactory.py
|
# Copyright (c) 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.plugins.cdh import abstractversionhandler as avh
from sahara.plugins.cdh import versionfactory as vf
from sahara.tests.unit import base
class VersionFactoryTestCase(base.SaharaTestCase):
def test_get_instance(self):
self.assertFalse(vf.VersionFactory.initialized)
factory = vf.VersionFactory.get_instance()
self.assertIsInstance(factory, vf.VersionFactory)
self.assertTrue(vf.VersionFactory.initialized)
def test_get_versions(self):
factory = vf.VersionFactory.get_instance()
versions = factory.get_versions()
expected_versions = self.get_support_versions()
self.assertEqual(expected_versions, versions)
def test_get_version_handler(self):
factory = vf.VersionFactory.get_instance()
versions = self.get_support_versions()
for version in versions:
hander = factory.get_version_handler(version)
self.assertIsInstance(hander, avh.AbstractVersionHandler)
def get_support_versions(self):
return ['5', '5.3.0', '5.4.0']
|
Add test cases for CDH plugin versionfactory
|
Add test cases for CDH plugin versionfactory
Changes:
* add test cases for CDH plugin versionfactory
* add a function to get supported versions into utils
partially implements bp: cdh-plugin-refactoring
Change-Id: I3b60c65ab8a6bb7c296b2a8a1ebbfd4bacfbe6b4
|
Python
|
apache-2.0
|
openstack/sahara,egafford/sahara,tellesnobrega/sahara,openstack/sahara,tellesnobrega/sahara,egafford/sahara
|
Add test cases for CDH plugin versionfactory
Changes:
* add test cases for CDH plugin versionfactory
* add a function to get supported versions into utils
partially implements bp: cdh-plugin-refactoring
Change-Id: I3b60c65ab8a6bb7c296b2a8a1ebbfd4bacfbe6b4
|
# Copyright (c) 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.plugins.cdh import abstractversionhandler as avh
from sahara.plugins.cdh import versionfactory as vf
from sahara.tests.unit import base
class VersionFactoryTestCase(base.SaharaTestCase):
def test_get_instance(self):
self.assertFalse(vf.VersionFactory.initialized)
factory = vf.VersionFactory.get_instance()
self.assertIsInstance(factory, vf.VersionFactory)
self.assertTrue(vf.VersionFactory.initialized)
def test_get_versions(self):
factory = vf.VersionFactory.get_instance()
versions = factory.get_versions()
expected_versions = self.get_support_versions()
self.assertEqual(expected_versions, versions)
def test_get_version_handler(self):
factory = vf.VersionFactory.get_instance()
versions = self.get_support_versions()
for version in versions:
hander = factory.get_version_handler(version)
self.assertIsInstance(hander, avh.AbstractVersionHandler)
def get_support_versions(self):
return ['5', '5.3.0', '5.4.0']
|
<commit_before><commit_msg>Add test cases for CDH plugin versionfactory
Changes:
* add test cases for CDH plugin versionfactory
* add a function to get supported versions into utils
partially implements bp: cdh-plugin-refactoring
Change-Id: I3b60c65ab8a6bb7c296b2a8a1ebbfd4bacfbe6b4<commit_after>
|
# Copyright (c) 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.plugins.cdh import abstractversionhandler as avh
from sahara.plugins.cdh import versionfactory as vf
from sahara.tests.unit import base
class VersionFactoryTestCase(base.SaharaTestCase):
def test_get_instance(self):
self.assertFalse(vf.VersionFactory.initialized)
factory = vf.VersionFactory.get_instance()
self.assertIsInstance(factory, vf.VersionFactory)
self.assertTrue(vf.VersionFactory.initialized)
def test_get_versions(self):
factory = vf.VersionFactory.get_instance()
versions = factory.get_versions()
expected_versions = self.get_support_versions()
self.assertEqual(expected_versions, versions)
def test_get_version_handler(self):
factory = vf.VersionFactory.get_instance()
versions = self.get_support_versions()
for version in versions:
hander = factory.get_version_handler(version)
self.assertIsInstance(hander, avh.AbstractVersionHandler)
def get_support_versions(self):
return ['5', '5.3.0', '5.4.0']
|
Add test cases for CDH plugin versionfactory
Changes:
* add test cases for CDH plugin versionfactory
* add a function to get supported versions into utils
partially implements bp: cdh-plugin-refactoring
Change-Id: I3b60c65ab8a6bb7c296b2a8a1ebbfd4bacfbe6b4# Copyright (c) 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.plugins.cdh import abstractversionhandler as avh
from sahara.plugins.cdh import versionfactory as vf
from sahara.tests.unit import base
class VersionFactoryTestCase(base.SaharaTestCase):
def test_get_instance(self):
self.assertFalse(vf.VersionFactory.initialized)
factory = vf.VersionFactory.get_instance()
self.assertIsInstance(factory, vf.VersionFactory)
self.assertTrue(vf.VersionFactory.initialized)
def test_get_versions(self):
factory = vf.VersionFactory.get_instance()
versions = factory.get_versions()
expected_versions = self.get_support_versions()
self.assertEqual(expected_versions, versions)
def test_get_version_handler(self):
factory = vf.VersionFactory.get_instance()
versions = self.get_support_versions()
for version in versions:
hander = factory.get_version_handler(version)
self.assertIsInstance(hander, avh.AbstractVersionHandler)
def get_support_versions(self):
return ['5', '5.3.0', '5.4.0']
|
<commit_before><commit_msg>Add test cases for CDH plugin versionfactory
Changes:
* add test cases for CDH plugin versionfactory
* add a function to get supported versions into utils
partially implements bp: cdh-plugin-refactoring
Change-Id: I3b60c65ab8a6bb7c296b2a8a1ebbfd4bacfbe6b4<commit_after># Copyright (c) 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.plugins.cdh import abstractversionhandler as avh
from sahara.plugins.cdh import versionfactory as vf
from sahara.tests.unit import base
class VersionFactoryTestCase(base.SaharaTestCase):
def test_get_instance(self):
self.assertFalse(vf.VersionFactory.initialized)
factory = vf.VersionFactory.get_instance()
self.assertIsInstance(factory, vf.VersionFactory)
self.assertTrue(vf.VersionFactory.initialized)
def test_get_versions(self):
factory = vf.VersionFactory.get_instance()
versions = factory.get_versions()
expected_versions = self.get_support_versions()
self.assertEqual(expected_versions, versions)
def test_get_version_handler(self):
factory = vf.VersionFactory.get_instance()
versions = self.get_support_versions()
for version in versions:
hander = factory.get_version_handler(version)
self.assertIsInstance(hander, avh.AbstractVersionHandler)
def get_support_versions(self):
return ['5', '5.3.0', '5.4.0']
|
|
32bb59d990a00a288c083d23a32b179e61486aff
|
generic/test_client.py
|
generic/test_client.py
|
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httpretty
from keystoneclient.generic import client
from keystoneclient.openstack.common import jsonutils
from keystoneclient.tests import utils
BASE_HOST = 'http://keystone.example.com'
BASE_URL = "%s:5000/" % BASE_HOST
V2_URL = "%sv2.0" % BASE_URL
EXTENSION_NAMESPACE = "http://docs.openstack.org/identity/api/ext/OS-FAKE/v1.0"
EXTENSION_DESCRIBED = {"href": "https://github.com/openstack/identity-api",
"rel": "describedby",
"type": "text/html"}
EXTENSION_ALIAS_FOO = "OS-FAKE-FOO"
EXTENSION_NAME_FOO = "OpenStack Keystone Fake Extension Foo"
EXTENSION_FOO = {"alias": EXTENSION_ALIAS_FOO,
"description": "Fake Foo extension to V2.0 API.",
"links": [EXTENSION_DESCRIBED],
"name": EXTENSION_NAME_FOO,
"namespace": EXTENSION_NAMESPACE,
"updated": '2014-01-08T00:00:00Z'}
EXTENSION_ALIAS_BAR = "OS-FAKE-BAR"
EXTENSION_NAME_BAR = "OpenStack Keystone Fake Extension Bar"
EXTENSION_BAR = {"alias": EXTENSION_ALIAS_BAR,
"description": "Fake Bar extension to V2.0 API.",
"links": [EXTENSION_DESCRIBED],
"name": EXTENSION_NAME_BAR,
"namespace": EXTENSION_NAMESPACE,
"updated": '2014-01-08T00:00:00Z'}
def _create_extension_list(extensions):
return jsonutils.dumps({'extensions': {'values': extensions}})
EXTENSION_LIST = _create_extension_list([EXTENSION_FOO, EXTENSION_BAR])
@httpretty.activate
class ClientDiscoveryTests(utils.TestCase):
def test_discover_extensions_v2(self):
httpretty.register_uri(httpretty.GET, "%s/extensions" % V2_URL,
body=EXTENSION_LIST)
extensions = client.Client().discover_extensions(url=V2_URL)
self.assertIn(EXTENSION_ALIAS_FOO, extensions)
self.assertEqual(extensions[EXTENSION_ALIAS_FOO], EXTENSION_NAME_FOO)
self.assertIn(EXTENSION_ALIAS_BAR, extensions)
self.assertEqual(extensions[EXTENSION_ALIAS_BAR], EXTENSION_NAME_BAR)
|
Fix discover command failed to read extension list issue
|
Fix discover command failed to read extension list issue
Fix the key error which caused discover command failed to read the
response body of extension list result. This change also added test
cases to cover the use case of generic client extension discovery.
Change-Id: Id687f8d73cead28f594de00d3b5ff9086558947b
Closes-Bug: #1266710
|
Python
|
apache-2.0
|
varunarya10/keystonemiddleware
|
Fix discover command failed to read extension list issue
Fix the key error which caused discover command failed to read the
response body of extension list result. This change also added test
cases to cover the use case of generic client extension discovery.
Change-Id: Id687f8d73cead28f594de00d3b5ff9086558947b
Closes-Bug: #1266710
|
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httpretty
from keystoneclient.generic import client
from keystoneclient.openstack.common import jsonutils
from keystoneclient.tests import utils
BASE_HOST = 'http://keystone.example.com'
BASE_URL = "%s:5000/" % BASE_HOST
V2_URL = "%sv2.0" % BASE_URL
EXTENSION_NAMESPACE = "http://docs.openstack.org/identity/api/ext/OS-FAKE/v1.0"
EXTENSION_DESCRIBED = {"href": "https://github.com/openstack/identity-api",
"rel": "describedby",
"type": "text/html"}
EXTENSION_ALIAS_FOO = "OS-FAKE-FOO"
EXTENSION_NAME_FOO = "OpenStack Keystone Fake Extension Foo"
EXTENSION_FOO = {"alias": EXTENSION_ALIAS_FOO,
"description": "Fake Foo extension to V2.0 API.",
"links": [EXTENSION_DESCRIBED],
"name": EXTENSION_NAME_FOO,
"namespace": EXTENSION_NAMESPACE,
"updated": '2014-01-08T00:00:00Z'}
EXTENSION_ALIAS_BAR = "OS-FAKE-BAR"
EXTENSION_NAME_BAR = "OpenStack Keystone Fake Extension Bar"
EXTENSION_BAR = {"alias": EXTENSION_ALIAS_BAR,
"description": "Fake Bar extension to V2.0 API.",
"links": [EXTENSION_DESCRIBED],
"name": EXTENSION_NAME_BAR,
"namespace": EXTENSION_NAMESPACE,
"updated": '2014-01-08T00:00:00Z'}
def _create_extension_list(extensions):
return jsonutils.dumps({'extensions': {'values': extensions}})
EXTENSION_LIST = _create_extension_list([EXTENSION_FOO, EXTENSION_BAR])
@httpretty.activate
class ClientDiscoveryTests(utils.TestCase):
def test_discover_extensions_v2(self):
httpretty.register_uri(httpretty.GET, "%s/extensions" % V2_URL,
body=EXTENSION_LIST)
extensions = client.Client().discover_extensions(url=V2_URL)
self.assertIn(EXTENSION_ALIAS_FOO, extensions)
self.assertEqual(extensions[EXTENSION_ALIAS_FOO], EXTENSION_NAME_FOO)
self.assertIn(EXTENSION_ALIAS_BAR, extensions)
self.assertEqual(extensions[EXTENSION_ALIAS_BAR], EXTENSION_NAME_BAR)
|
<commit_before><commit_msg>Fix discover command failed to read extension list issue
Fix the key error which caused discover command failed to read the
response body of extension list result. This change also added test
cases to cover the use case of generic client extension discovery.
Change-Id: Id687f8d73cead28f594de00d3b5ff9086558947b
Closes-Bug: #1266710<commit_after>
|
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httpretty
from keystoneclient.generic import client
from keystoneclient.openstack.common import jsonutils
from keystoneclient.tests import utils
BASE_HOST = 'http://keystone.example.com'
BASE_URL = "%s:5000/" % BASE_HOST
V2_URL = "%sv2.0" % BASE_URL
EXTENSION_NAMESPACE = "http://docs.openstack.org/identity/api/ext/OS-FAKE/v1.0"
EXTENSION_DESCRIBED = {"href": "https://github.com/openstack/identity-api",
"rel": "describedby",
"type": "text/html"}
EXTENSION_ALIAS_FOO = "OS-FAKE-FOO"
EXTENSION_NAME_FOO = "OpenStack Keystone Fake Extension Foo"
EXTENSION_FOO = {"alias": EXTENSION_ALIAS_FOO,
"description": "Fake Foo extension to V2.0 API.",
"links": [EXTENSION_DESCRIBED],
"name": EXTENSION_NAME_FOO,
"namespace": EXTENSION_NAMESPACE,
"updated": '2014-01-08T00:00:00Z'}
EXTENSION_ALIAS_BAR = "OS-FAKE-BAR"
EXTENSION_NAME_BAR = "OpenStack Keystone Fake Extension Bar"
EXTENSION_BAR = {"alias": EXTENSION_ALIAS_BAR,
"description": "Fake Bar extension to V2.0 API.",
"links": [EXTENSION_DESCRIBED],
"name": EXTENSION_NAME_BAR,
"namespace": EXTENSION_NAMESPACE,
"updated": '2014-01-08T00:00:00Z'}
def _create_extension_list(extensions):
return jsonutils.dumps({'extensions': {'values': extensions}})
EXTENSION_LIST = _create_extension_list([EXTENSION_FOO, EXTENSION_BAR])
@httpretty.activate
class ClientDiscoveryTests(utils.TestCase):
def test_discover_extensions_v2(self):
httpretty.register_uri(httpretty.GET, "%s/extensions" % V2_URL,
body=EXTENSION_LIST)
extensions = client.Client().discover_extensions(url=V2_URL)
self.assertIn(EXTENSION_ALIAS_FOO, extensions)
self.assertEqual(extensions[EXTENSION_ALIAS_FOO], EXTENSION_NAME_FOO)
self.assertIn(EXTENSION_ALIAS_BAR, extensions)
self.assertEqual(extensions[EXTENSION_ALIAS_BAR], EXTENSION_NAME_BAR)
|
Fix discover command failed to read extension list issue
Fix the key error which caused discover command failed to read the
response body of extension list result. This change also added test
cases to cover the use case of generic client extension discovery.
Change-Id: Id687f8d73cead28f594de00d3b5ff9086558947b
Closes-Bug: #1266710# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httpretty
from keystoneclient.generic import client
from keystoneclient.openstack.common import jsonutils
from keystoneclient.tests import utils
BASE_HOST = 'http://keystone.example.com'
BASE_URL = "%s:5000/" % BASE_HOST
V2_URL = "%sv2.0" % BASE_URL
EXTENSION_NAMESPACE = "http://docs.openstack.org/identity/api/ext/OS-FAKE/v1.0"
EXTENSION_DESCRIBED = {"href": "https://github.com/openstack/identity-api",
"rel": "describedby",
"type": "text/html"}
EXTENSION_ALIAS_FOO = "OS-FAKE-FOO"
EXTENSION_NAME_FOO = "OpenStack Keystone Fake Extension Foo"
EXTENSION_FOO = {"alias": EXTENSION_ALIAS_FOO,
"description": "Fake Foo extension to V2.0 API.",
"links": [EXTENSION_DESCRIBED],
"name": EXTENSION_NAME_FOO,
"namespace": EXTENSION_NAMESPACE,
"updated": '2014-01-08T00:00:00Z'}
EXTENSION_ALIAS_BAR = "OS-FAKE-BAR"
EXTENSION_NAME_BAR = "OpenStack Keystone Fake Extension Bar"
EXTENSION_BAR = {"alias": EXTENSION_ALIAS_BAR,
"description": "Fake Bar extension to V2.0 API.",
"links": [EXTENSION_DESCRIBED],
"name": EXTENSION_NAME_BAR,
"namespace": EXTENSION_NAMESPACE,
"updated": '2014-01-08T00:00:00Z'}
def _create_extension_list(extensions):
return jsonutils.dumps({'extensions': {'values': extensions}})
EXTENSION_LIST = _create_extension_list([EXTENSION_FOO, EXTENSION_BAR])
@httpretty.activate
class ClientDiscoveryTests(utils.TestCase):
def test_discover_extensions_v2(self):
httpretty.register_uri(httpretty.GET, "%s/extensions" % V2_URL,
body=EXTENSION_LIST)
extensions = client.Client().discover_extensions(url=V2_URL)
self.assertIn(EXTENSION_ALIAS_FOO, extensions)
self.assertEqual(extensions[EXTENSION_ALIAS_FOO], EXTENSION_NAME_FOO)
self.assertIn(EXTENSION_ALIAS_BAR, extensions)
self.assertEqual(extensions[EXTENSION_ALIAS_BAR], EXTENSION_NAME_BAR)
|
<commit_before><commit_msg>Fix discover command failed to read extension list issue
Fix the key error which caused discover command failed to read the
response body of extension list result. This change also added test
cases to cover the use case of generic client extension discovery.
Change-Id: Id687f8d73cead28f594de00d3b5ff9086558947b
Closes-Bug: #1266710<commit_after># Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httpretty
from keystoneclient.generic import client
from keystoneclient.openstack.common import jsonutils
from keystoneclient.tests import utils
BASE_HOST = 'http://keystone.example.com'
BASE_URL = "%s:5000/" % BASE_HOST
V2_URL = "%sv2.0" % BASE_URL
EXTENSION_NAMESPACE = "http://docs.openstack.org/identity/api/ext/OS-FAKE/v1.0"
EXTENSION_DESCRIBED = {"href": "https://github.com/openstack/identity-api",
"rel": "describedby",
"type": "text/html"}
EXTENSION_ALIAS_FOO = "OS-FAKE-FOO"
EXTENSION_NAME_FOO = "OpenStack Keystone Fake Extension Foo"
EXTENSION_FOO = {"alias": EXTENSION_ALIAS_FOO,
"description": "Fake Foo extension to V2.0 API.",
"links": [EXTENSION_DESCRIBED],
"name": EXTENSION_NAME_FOO,
"namespace": EXTENSION_NAMESPACE,
"updated": '2014-01-08T00:00:00Z'}
EXTENSION_ALIAS_BAR = "OS-FAKE-BAR"
EXTENSION_NAME_BAR = "OpenStack Keystone Fake Extension Bar"
EXTENSION_BAR = {"alias": EXTENSION_ALIAS_BAR,
"description": "Fake Bar extension to V2.0 API.",
"links": [EXTENSION_DESCRIBED],
"name": EXTENSION_NAME_BAR,
"namespace": EXTENSION_NAMESPACE,
"updated": '2014-01-08T00:00:00Z'}
def _create_extension_list(extensions):
return jsonutils.dumps({'extensions': {'values': extensions}})
EXTENSION_LIST = _create_extension_list([EXTENSION_FOO, EXTENSION_BAR])
@httpretty.activate
class ClientDiscoveryTests(utils.TestCase):
def test_discover_extensions_v2(self):
httpretty.register_uri(httpretty.GET, "%s/extensions" % V2_URL,
body=EXTENSION_LIST)
extensions = client.Client().discover_extensions(url=V2_URL)
self.assertIn(EXTENSION_ALIAS_FOO, extensions)
self.assertEqual(extensions[EXTENSION_ALIAS_FOO], EXTENSION_NAME_FOO)
self.assertIn(EXTENSION_ALIAS_BAR, extensions)
self.assertEqual(extensions[EXTENSION_ALIAS_BAR], EXTENSION_NAME_BAR)
|
|
e0fe2901e699ed4ac8ded58fed0793c495eec02e
|
conftest.py
|
conftest.py
|
# -*- coding: utf-8 -*-
import pytest
import os
import json
@pytest.fixture(scope="class",
params=os.listdir('test_fixtures'))
def story(request):
"""
Provides a detailed HPACK story to test with.
"""
path = os.path.join('test_fixtures', request.param)
with open(path, 'r', encoding='utf-8') as f:
details = json.loads(f.read())
return details
|
Create a py.test fixture to use the new stories.
|
Create a py.test fixture to use the new stories.
|
Python
|
mit
|
plucury/hyper,masaori335/hyper,lawnmowerlatte/hyper,fredthomsen/hyper,irvind/hyper,jdecuyper/hyper,jdecuyper/hyper,masaori335/hyper,Lukasa/hyper,lawnmowerlatte/hyper,irvind/hyper,Lukasa/hyper,fredthomsen/hyper,plucury/hyper
|
Create a py.test fixture to use the new stories.
|
# -*- coding: utf-8 -*-
import pytest
import os
import json
@pytest.fixture(scope="class",
params=os.listdir('test_fixtures'))
def story(request):
"""
Provides a detailed HPACK story to test with.
"""
path = os.path.join('test_fixtures', request.param)
with open(path, 'r', encoding='utf-8') as f:
details = json.loads(f.read())
return details
|
<commit_before><commit_msg>Create a py.test fixture to use the new stories.<commit_after>
|
# -*- coding: utf-8 -*-
import pytest
import os
import json
@pytest.fixture(scope="class",
params=os.listdir('test_fixtures'))
def story(request):
"""
Provides a detailed HPACK story to test with.
"""
path = os.path.join('test_fixtures', request.param)
with open(path, 'r', encoding='utf-8') as f:
details = json.loads(f.read())
return details
|
Create a py.test fixture to use the new stories.# -*- coding: utf-8 -*-
import pytest
import os
import json
@pytest.fixture(scope="class",
params=os.listdir('test_fixtures'))
def story(request):
"""
Provides a detailed HPACK story to test with.
"""
path = os.path.join('test_fixtures', request.param)
with open(path, 'r', encoding='utf-8') as f:
details = json.loads(f.read())
return details
|
<commit_before><commit_msg>Create a py.test fixture to use the new stories.<commit_after># -*- coding: utf-8 -*-
import pytest
import os
import json
@pytest.fixture(scope="class",
params=os.listdir('test_fixtures'))
def story(request):
"""
Provides a detailed HPACK story to test with.
"""
path = os.path.join('test_fixtures', request.param)
with open(path, 'r', encoding='utf-8') as f:
details = json.loads(f.read())
return details
|
|
c694a5a5e5ba9403eeca57feb9fd21edccd95132
|
scripts/get_bank_registry_hr.py
|
scripts/get_bank_registry_hr.py
|
#!/usr/bin/env python
import json
import xlrd
import requests
URL = (
"https://www.hnb.hr/documents/20182/121798/tf-pp-ds-vbb-xlsx-e-vbb.xlsx/"
"06982c63-13e3-4aa0-846d-afb7956ee731"
)
def process():
registry = []
book = xlrd.open_workbook(file_contents=requests.get(URL).content)
sheet = book.sheet_by_index(0)
for row in list(sheet.get_rows())[4:]:
name, bank_code, bic = row[1:]
if bank_code.value != "":
registry.append(
{
"country_code": "HR",
"primary": True,
"bic": bic.value.upper().replace(" ", ""),
"bank_code": int(bank_code.value),
"name": name.value,
"short_name": name.value,
}
)
return registry
if __name__ == "__main__":
with open("schwifty/bank_registry/generated_hr.json", "w") as fp:
json.dump(process(), fp, indent=2)
|
Add script to generate HR bic code registry
|
Add script to generate HR bic code registry
|
Python
|
mit
|
figo-connect/schwifty
|
Add script to generate HR bic code registry
|
#!/usr/bin/env python
import json
import xlrd
import requests
URL = (
"https://www.hnb.hr/documents/20182/121798/tf-pp-ds-vbb-xlsx-e-vbb.xlsx/"
"06982c63-13e3-4aa0-846d-afb7956ee731"
)
def process():
registry = []
book = xlrd.open_workbook(file_contents=requests.get(URL).content)
sheet = book.sheet_by_index(0)
for row in list(sheet.get_rows())[4:]:
name, bank_code, bic = row[1:]
if bank_code.value != "":
registry.append(
{
"country_code": "HR",
"primary": True,
"bic": bic.value.upper().replace(" ", ""),
"bank_code": int(bank_code.value),
"name": name.value,
"short_name": name.value,
}
)
return registry
if __name__ == "__main__":
with open("schwifty/bank_registry/generated_hr.json", "w") as fp:
json.dump(process(), fp, indent=2)
|
<commit_before><commit_msg>Add script to generate HR bic code registry<commit_after>
|
#!/usr/bin/env python
import json
import xlrd
import requests
URL = (
"https://www.hnb.hr/documents/20182/121798/tf-pp-ds-vbb-xlsx-e-vbb.xlsx/"
"06982c63-13e3-4aa0-846d-afb7956ee731"
)
def process():
registry = []
book = xlrd.open_workbook(file_contents=requests.get(URL).content)
sheet = book.sheet_by_index(0)
for row in list(sheet.get_rows())[4:]:
name, bank_code, bic = row[1:]
if bank_code.value != "":
registry.append(
{
"country_code": "HR",
"primary": True,
"bic": bic.value.upper().replace(" ", ""),
"bank_code": int(bank_code.value),
"name": name.value,
"short_name": name.value,
}
)
return registry
if __name__ == "__main__":
with open("schwifty/bank_registry/generated_hr.json", "w") as fp:
json.dump(process(), fp, indent=2)
|
Add script to generate HR bic code registry#!/usr/bin/env python
import json
import xlrd
import requests
URL = (
"https://www.hnb.hr/documents/20182/121798/tf-pp-ds-vbb-xlsx-e-vbb.xlsx/"
"06982c63-13e3-4aa0-846d-afb7956ee731"
)
def process():
registry = []
book = xlrd.open_workbook(file_contents=requests.get(URL).content)
sheet = book.sheet_by_index(0)
for row in list(sheet.get_rows())[4:]:
name, bank_code, bic = row[1:]
if bank_code.value != "":
registry.append(
{
"country_code": "HR",
"primary": True,
"bic": bic.value.upper().replace(" ", ""),
"bank_code": int(bank_code.value),
"name": name.value,
"short_name": name.value,
}
)
return registry
if __name__ == "__main__":
with open("schwifty/bank_registry/generated_hr.json", "w") as fp:
json.dump(process(), fp, indent=2)
|
<commit_before><commit_msg>Add script to generate HR bic code registry<commit_after>#!/usr/bin/env python
import json
import xlrd
import requests
URL = (
"https://www.hnb.hr/documents/20182/121798/tf-pp-ds-vbb-xlsx-e-vbb.xlsx/"
"06982c63-13e3-4aa0-846d-afb7956ee731"
)
def process():
registry = []
book = xlrd.open_workbook(file_contents=requests.get(URL).content)
sheet = book.sheet_by_index(0)
for row in list(sheet.get_rows())[4:]:
name, bank_code, bic = row[1:]
if bank_code.value != "":
registry.append(
{
"country_code": "HR",
"primary": True,
"bic": bic.value.upper().replace(" ", ""),
"bank_code": int(bank_code.value),
"name": name.value,
"short_name": name.value,
}
)
return registry
if __name__ == "__main__":
with open("schwifty/bank_registry/generated_hr.json", "w") as fp:
json.dump(process(), fp, indent=2)
|
|
bf059b8b781c786edb4b3ca7ef838e9b4d16d3a0
|
tests/commands/test_lib.py
|
tests/commands/test_lib.py
|
# Copyright (C) Ivan Kravets <me@ikravets.com>
# See LICENSE for details.
from os import listdir
from os.path import isdir, isfile, join
import re
from click.testing import CliRunner
from platformio.commands.lib import cli
from platformio import util
runner = CliRunner()
def validate_output(result):
assert result.exit_code == 0
assert not result.exception
assert "error" not in result.output.lower()
def validate_libfolder():
libs_path = util.get_lib_dir()
installed_libs = listdir(libs_path)
for lib in installed_libs:
assert isdir(join(libs_path, lib))
assert isfile(join(libs_path, lib, ".library.json")) and isfile(
join(libs_path, lib, "library.json"))
def test_lib_search():
result = runner.invoke(cli, ["search", "DHT22"])
validate_output(result)
match = re.search(r"Found\s+(\d+)\slibraries:", result.output)
assert int(match.group(1)) > 2
result = runner.invoke(cli, ["search", "DHT22", "--platform=timsp430"])
validate_output(result)
match = re.search(r"Found\s+(\d+)\slibraries:", result.output)
assert int(match.group(1)) == 1
def test_lib_install():
result = runner.invoke(cli, ["install", "58", "115"])
validate_output(result)
validate_libfolder()
def test_lib_list():
result = runner.invoke(cli, ["list"])
validate_output(result)
assert "58" in result.output and "115" in result.output
def test_lib_show():
result = runner.invoke(cli, ["show", "115"])
validate_output(result)
assert "arduino" in result.output and "atmelavr" in result.output
result = runner.invoke(cli, ["show", "58"])
validate_output(result)
assert "energia" in result.output and "timsp430" in result.output
def test_lib_update():
result = runner.invoke(cli, ["update"])
validate_output(result)
assert "58" in result.output and "115" in result.output
def test_lib_uninstall():
result = runner.invoke(cli, ["uninstall", "58", "115"])
validate_output(result)
|
Cover "lib" commands with tests
|
Cover "lib" commands with tests
|
Python
|
apache-2.0
|
awong1900/platformio,platformio/platformio,ZachMassia/platformio,mplewis/platformio,bkudria/platformio,platformio/platformio-core,jrobeson/platformio,TimJay/platformio,jrobeson/platformio,valeros/platformio,atyenoria/platformio,jrobeson/platformio,bkudria/platformio,TimJay/platformio,mcanthony/platformio,mseroczynski/platformio,jrobeson/platformio,TimJay/platformio,dkuku/platformio,TimJay/platformio,platformio/platformio-core,bkudria/platformio,TimJay/platformio,awong1900/platformio,eiginn/platformio,bkudria/platformio,awong1900/platformio
|
Cover "lib" commands with tests
|
# Copyright (C) Ivan Kravets <me@ikravets.com>
# See LICENSE for details.
from os import listdir
from os.path import isdir, isfile, join
import re
from click.testing import CliRunner
from platformio.commands.lib import cli
from platformio import util
runner = CliRunner()
def validate_output(result):
assert result.exit_code == 0
assert not result.exception
assert "error" not in result.output.lower()
def validate_libfolder():
libs_path = util.get_lib_dir()
installed_libs = listdir(libs_path)
for lib in installed_libs:
assert isdir(join(libs_path, lib))
assert isfile(join(libs_path, lib, ".library.json")) and isfile(
join(libs_path, lib, "library.json"))
def test_lib_search():
result = runner.invoke(cli, ["search", "DHT22"])
validate_output(result)
match = re.search(r"Found\s+(\d+)\slibraries:", result.output)
assert int(match.group(1)) > 2
result = runner.invoke(cli, ["search", "DHT22", "--platform=timsp430"])
validate_output(result)
match = re.search(r"Found\s+(\d+)\slibraries:", result.output)
assert int(match.group(1)) == 1
def test_lib_install():
result = runner.invoke(cli, ["install", "58", "115"])
validate_output(result)
validate_libfolder()
def test_lib_list():
result = runner.invoke(cli, ["list"])
validate_output(result)
assert "58" in result.output and "115" in result.output
def test_lib_show():
result = runner.invoke(cli, ["show", "115"])
validate_output(result)
assert "arduino" in result.output and "atmelavr" in result.output
result = runner.invoke(cli, ["show", "58"])
validate_output(result)
assert "energia" in result.output and "timsp430" in result.output
def test_lib_update():
result = runner.invoke(cli, ["update"])
validate_output(result)
assert "58" in result.output and "115" in result.output
def test_lib_uninstall():
result = runner.invoke(cli, ["uninstall", "58", "115"])
validate_output(result)
|
<commit_before><commit_msg>Cover "lib" commands with tests<commit_after>
|
# Copyright (C) Ivan Kravets <me@ikravets.com>
# See LICENSE for details.
from os import listdir
from os.path import isdir, isfile, join
import re
from click.testing import CliRunner
from platformio.commands.lib import cli
from platformio import util
runner = CliRunner()
def validate_output(result):
assert result.exit_code == 0
assert not result.exception
assert "error" not in result.output.lower()
def validate_libfolder():
libs_path = util.get_lib_dir()
installed_libs = listdir(libs_path)
for lib in installed_libs:
assert isdir(join(libs_path, lib))
assert isfile(join(libs_path, lib, ".library.json")) and isfile(
join(libs_path, lib, "library.json"))
def test_lib_search():
result = runner.invoke(cli, ["search", "DHT22"])
validate_output(result)
match = re.search(r"Found\s+(\d+)\slibraries:", result.output)
assert int(match.group(1)) > 2
result = runner.invoke(cli, ["search", "DHT22", "--platform=timsp430"])
validate_output(result)
match = re.search(r"Found\s+(\d+)\slibraries:", result.output)
assert int(match.group(1)) == 1
def test_lib_install():
result = runner.invoke(cli, ["install", "58", "115"])
validate_output(result)
validate_libfolder()
def test_lib_list():
result = runner.invoke(cli, ["list"])
validate_output(result)
assert "58" in result.output and "115" in result.output
def test_lib_show():
result = runner.invoke(cli, ["show", "115"])
validate_output(result)
assert "arduino" in result.output and "atmelavr" in result.output
result = runner.invoke(cli, ["show", "58"])
validate_output(result)
assert "energia" in result.output and "timsp430" in result.output
def test_lib_update():
result = runner.invoke(cli, ["update"])
validate_output(result)
assert "58" in result.output and "115" in result.output
def test_lib_uninstall():
result = runner.invoke(cli, ["uninstall", "58", "115"])
validate_output(result)
|
Cover "lib" commands with tests# Copyright (C) Ivan Kravets <me@ikravets.com>
# See LICENSE for details.
from os import listdir
from os.path import isdir, isfile, join
import re
from click.testing import CliRunner
from platformio.commands.lib import cli
from platformio import util
runner = CliRunner()
def validate_output(result):
assert result.exit_code == 0
assert not result.exception
assert "error" not in result.output.lower()
def validate_libfolder():
libs_path = util.get_lib_dir()
installed_libs = listdir(libs_path)
for lib in installed_libs:
assert isdir(join(libs_path, lib))
assert isfile(join(libs_path, lib, ".library.json")) and isfile(
join(libs_path, lib, "library.json"))
def test_lib_search():
result = runner.invoke(cli, ["search", "DHT22"])
validate_output(result)
match = re.search(r"Found\s+(\d+)\slibraries:", result.output)
assert int(match.group(1)) > 2
result = runner.invoke(cli, ["search", "DHT22", "--platform=timsp430"])
validate_output(result)
match = re.search(r"Found\s+(\d+)\slibraries:", result.output)
assert int(match.group(1)) == 1
def test_lib_install():
result = runner.invoke(cli, ["install", "58", "115"])
validate_output(result)
validate_libfolder()
def test_lib_list():
result = runner.invoke(cli, ["list"])
validate_output(result)
assert "58" in result.output and "115" in result.output
def test_lib_show():
result = runner.invoke(cli, ["show", "115"])
validate_output(result)
assert "arduino" in result.output and "atmelavr" in result.output
result = runner.invoke(cli, ["show", "58"])
validate_output(result)
assert "energia" in result.output and "timsp430" in result.output
def test_lib_update():
result = runner.invoke(cli, ["update"])
validate_output(result)
assert "58" in result.output and "115" in result.output
def test_lib_uninstall():
result = runner.invoke(cli, ["uninstall", "58", "115"])
validate_output(result)
|
<commit_before><commit_msg>Cover "lib" commands with tests<commit_after># Copyright (C) Ivan Kravets <me@ikravets.com>
# See LICENSE for details.
from os import listdir
from os.path import isdir, isfile, join
import re
from click.testing import CliRunner
from platformio.commands.lib import cli
from platformio import util
runner = CliRunner()
def validate_output(result):
assert result.exit_code == 0
assert not result.exception
assert "error" not in result.output.lower()
def validate_libfolder():
libs_path = util.get_lib_dir()
installed_libs = listdir(libs_path)
for lib in installed_libs:
assert isdir(join(libs_path, lib))
assert isfile(join(libs_path, lib, ".library.json")) and isfile(
join(libs_path, lib, "library.json"))
def test_lib_search():
result = runner.invoke(cli, ["search", "DHT22"])
validate_output(result)
match = re.search(r"Found\s+(\d+)\slibraries:", result.output)
assert int(match.group(1)) > 2
result = runner.invoke(cli, ["search", "DHT22", "--platform=timsp430"])
validate_output(result)
match = re.search(r"Found\s+(\d+)\slibraries:", result.output)
assert int(match.group(1)) == 1
def test_lib_install():
result = runner.invoke(cli, ["install", "58", "115"])
validate_output(result)
validate_libfolder()
def test_lib_list():
result = runner.invoke(cli, ["list"])
validate_output(result)
assert "58" in result.output and "115" in result.output
def test_lib_show():
result = runner.invoke(cli, ["show", "115"])
validate_output(result)
assert "arduino" in result.output and "atmelavr" in result.output
result = runner.invoke(cli, ["show", "58"])
validate_output(result)
assert "energia" in result.output and "timsp430" in result.output
def test_lib_update():
result = runner.invoke(cli, ["update"])
validate_output(result)
assert "58" in result.output and "115" in result.output
def test_lib_uninstall():
result = runner.invoke(cli, ["uninstall", "58", "115"])
validate_output(result)
|
|
c9f2fcdb62856f112a17a51020c9cb215bb8620c
|
tests/test_gitlab_local.py
|
tests/test_gitlab_local.py
|
# MIT licensed
# Copyright (c) 2013-2017 lilydjwg <lilydjwg@gmail.com>, et al.
import os
import pytest
import contextlib
pytestmark = [pytest.mark.asyncio,
pytest.mark.skipif(os.environ.get('TRAVIS') == 'true',
reason="rate-limited per IP")]
@contextlib.contextmanager
def unset_gitlab_token_env():
token = os.environ.get('NVCHECKER_GITLAB_TOKEN_GITLAB_COM')
try:
if token:
del os.environ['NVCHECKER_GITLAB_TOKEN_GITLAB_COM']
yield token
finally:
if token:
os.environ['NVCHECKER_GITLAB_TOKEN_GITLAB_COM'] = token
async def test_gitlab(get_version):
with unset_gitlab_token_env():
ver = await get_version("example",
{"gitlab": "gitlab-org/gitlab-test"})
assert len(ver) == 8
assert ver.isdigit()
async def test_gitlab_max_tag(get_version):
with unset_gitlab_token_env():
assert await get_version("example", {"gitlab": "gitlab-org/gitlab-test", "use_max_tag": 1}) == "v1.1.0"
async def test_gitlab_max_tag_with_ignored_tags(get_version):
with unset_gitlab_token_env():
ver = await get_version("example",
{"gitlab": "gitlab-org/gitlab-test", "use_max_tag": 1, "ignored_tags": "v1.1.0"})
assert ver == "v1.0.0"
|
Add local test for over limit check
|
tests: Add local test for over limit check
Signed-off-by: Xuanwo <60f7302bde66b4a81d2f70acbeb96280e4013e10@gmail.com>
|
Python
|
mit
|
lilydjwg/nvchecker
|
tests: Add local test for over limit check
Signed-off-by: Xuanwo <60f7302bde66b4a81d2f70acbeb96280e4013e10@gmail.com>
|
# MIT licensed
# Copyright (c) 2013-2017 lilydjwg <lilydjwg@gmail.com>, et al.
import os
import pytest
import contextlib
pytestmark = [pytest.mark.asyncio,
pytest.mark.skipif(os.environ.get('TRAVIS') == 'true',
reason="rate-limited per IP")]
@contextlib.contextmanager
def unset_gitlab_token_env():
token = os.environ.get('NVCHECKER_GITLAB_TOKEN_GITLAB_COM')
try:
if token:
del os.environ['NVCHECKER_GITLAB_TOKEN_GITLAB_COM']
yield token
finally:
if token:
os.environ['NVCHECKER_GITLAB_TOKEN_GITLAB_COM'] = token
async def test_gitlab(get_version):
with unset_gitlab_token_env():
ver = await get_version("example",
{"gitlab": "gitlab-org/gitlab-test"})
assert len(ver) == 8
assert ver.isdigit()
async def test_gitlab_max_tag(get_version):
with unset_gitlab_token_env():
assert await get_version("example", {"gitlab": "gitlab-org/gitlab-test", "use_max_tag": 1}) == "v1.1.0"
async def test_gitlab_max_tag_with_ignored_tags(get_version):
with unset_gitlab_token_env():
ver = await get_version("example",
{"gitlab": "gitlab-org/gitlab-test", "use_max_tag": 1, "ignored_tags": "v1.1.0"})
assert ver == "v1.0.0"
|
<commit_before><commit_msg>tests: Add local test for over limit check
Signed-off-by: Xuanwo <60f7302bde66b4a81d2f70acbeb96280e4013e10@gmail.com><commit_after>
|
# MIT licensed
# Copyright (c) 2013-2017 lilydjwg <lilydjwg@gmail.com>, et al.
import os
import pytest
import contextlib
pytestmark = [pytest.mark.asyncio,
pytest.mark.skipif(os.environ.get('TRAVIS') == 'true',
reason="rate-limited per IP")]
@contextlib.contextmanager
def unset_gitlab_token_env():
token = os.environ.get('NVCHECKER_GITLAB_TOKEN_GITLAB_COM')
try:
if token:
del os.environ['NVCHECKER_GITLAB_TOKEN_GITLAB_COM']
yield token
finally:
if token:
os.environ['NVCHECKER_GITLAB_TOKEN_GITLAB_COM'] = token
async def test_gitlab(get_version):
with unset_gitlab_token_env():
ver = await get_version("example",
{"gitlab": "gitlab-org/gitlab-test"})
assert len(ver) == 8
assert ver.isdigit()
async def test_gitlab_max_tag(get_version):
with unset_gitlab_token_env():
assert await get_version("example", {"gitlab": "gitlab-org/gitlab-test", "use_max_tag": 1}) == "v1.1.0"
async def test_gitlab_max_tag_with_ignored_tags(get_version):
with unset_gitlab_token_env():
ver = await get_version("example",
{"gitlab": "gitlab-org/gitlab-test", "use_max_tag": 1, "ignored_tags": "v1.1.0"})
assert ver == "v1.0.0"
|
tests: Add local test for over limit check
Signed-off-by: Xuanwo <60f7302bde66b4a81d2f70acbeb96280e4013e10@gmail.com># MIT licensed
# Copyright (c) 2013-2017 lilydjwg <lilydjwg@gmail.com>, et al.
import os
import pytest
import contextlib
pytestmark = [pytest.mark.asyncio,
pytest.mark.skipif(os.environ.get('TRAVIS') == 'true',
reason="rate-limited per IP")]
@contextlib.contextmanager
def unset_gitlab_token_env():
token = os.environ.get('NVCHECKER_GITLAB_TOKEN_GITLAB_COM')
try:
if token:
del os.environ['NVCHECKER_GITLAB_TOKEN_GITLAB_COM']
yield token
finally:
if token:
os.environ['NVCHECKER_GITLAB_TOKEN_GITLAB_COM'] = token
async def test_gitlab(get_version):
with unset_gitlab_token_env():
ver = await get_version("example",
{"gitlab": "gitlab-org/gitlab-test"})
assert len(ver) == 8
assert ver.isdigit()
async def test_gitlab_max_tag(get_version):
with unset_gitlab_token_env():
assert await get_version("example", {"gitlab": "gitlab-org/gitlab-test", "use_max_tag": 1}) == "v1.1.0"
async def test_gitlab_max_tag_with_ignored_tags(get_version):
with unset_gitlab_token_env():
ver = await get_version("example",
{"gitlab": "gitlab-org/gitlab-test", "use_max_tag": 1, "ignored_tags": "v1.1.0"})
assert ver == "v1.0.0"
|
<commit_before><commit_msg>tests: Add local test for over limit check
Signed-off-by: Xuanwo <60f7302bde66b4a81d2f70acbeb96280e4013e10@gmail.com><commit_after># MIT licensed
# Copyright (c) 2013-2017 lilydjwg <lilydjwg@gmail.com>, et al.
import os
import pytest
import contextlib
pytestmark = [pytest.mark.asyncio,
pytest.mark.skipif(os.environ.get('TRAVIS') == 'true',
reason="rate-limited per IP")]
@contextlib.contextmanager
def unset_gitlab_token_env():
token = os.environ.get('NVCHECKER_GITLAB_TOKEN_GITLAB_COM')
try:
if token:
del os.environ['NVCHECKER_GITLAB_TOKEN_GITLAB_COM']
yield token
finally:
if token:
os.environ['NVCHECKER_GITLAB_TOKEN_GITLAB_COM'] = token
async def test_gitlab(get_version):
with unset_gitlab_token_env():
ver = await get_version("example",
{"gitlab": "gitlab-org/gitlab-test"})
assert len(ver) == 8
assert ver.isdigit()
async def test_gitlab_max_tag(get_version):
with unset_gitlab_token_env():
assert await get_version("example", {"gitlab": "gitlab-org/gitlab-test", "use_max_tag": 1}) == "v1.1.0"
async def test_gitlab_max_tag_with_ignored_tags(get_version):
with unset_gitlab_token_env():
ver = await get_version("example",
{"gitlab": "gitlab-org/gitlab-test", "use_max_tag": 1, "ignored_tags": "v1.1.0"})
assert ver == "v1.0.0"
|
|
3653ad1320d7624aa798d67031827056ddf64efd
|
pontoon/base/migrations/0095_pontoon_intro_permalink_prefix.py
|
pontoon/base/migrations/0095_pontoon_intro_permalink_prefix.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-07-28 23:15
from __future__ import unicode_literals
from django.db import migrations
def load_initial_data(apps, schema_editor):
Project = apps.get_model('base', 'Project')
repository = Project.objects.get(slug="pontoon-intro").repositories.first()
if not repository.permalink_prefix:
repository.permalink_prefix = 'https://raw.githubusercontent.com/mozilla/pontoon-intro/master/static/locales/{locale_code}'
repository.save()
class Migration(migrations.Migration):
dependencies = [
('base', '0094_improve_calculate_stats_performance'),
]
operations = [
migrations.RunPython(load_initial_data),
]
|
Add required field data for the initial project
|
Add required field data for the initial project
The permalink_prefix field has been add to the Repository object and
made required, but we never added a data migration to populate it for
the initial project (Pontoon Intro). So let's do this now.
|
Python
|
bsd-3-clause
|
mathjazz/pontoon,mathjazz/pontoon,jotes/pontoon,mozilla/pontoon,mastizada/pontoon,mozilla/pontoon,mastizada/pontoon,mastizada/pontoon,mozilla/pontoon,mastizada/pontoon,jotes/pontoon,mozilla/pontoon,jotes/pontoon,mathjazz/pontoon,jotes/pontoon,mozilla/pontoon,mathjazz/pontoon,mathjazz/pontoon
|
Add required field data for the initial project
The permalink_prefix field has been add to the Repository object and
made required, but we never added a data migration to populate it for
the initial project (Pontoon Intro). So let's do this now.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-07-28 23:15
from __future__ import unicode_literals
from django.db import migrations
def load_initial_data(apps, schema_editor):
Project = apps.get_model('base', 'Project')
repository = Project.objects.get(slug="pontoon-intro").repositories.first()
if not repository.permalink_prefix:
repository.permalink_prefix = 'https://raw.githubusercontent.com/mozilla/pontoon-intro/master/static/locales/{locale_code}'
repository.save()
class Migration(migrations.Migration):
dependencies = [
('base', '0094_improve_calculate_stats_performance'),
]
operations = [
migrations.RunPython(load_initial_data),
]
|
<commit_before><commit_msg>Add required field data for the initial project
The permalink_prefix field has been add to the Repository object and
made required, but we never added a data migration to populate it for
the initial project (Pontoon Intro). So let's do this now.<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-07-28 23:15
from __future__ import unicode_literals
from django.db import migrations
def load_initial_data(apps, schema_editor):
Project = apps.get_model('base', 'Project')
repository = Project.objects.get(slug="pontoon-intro").repositories.first()
if not repository.permalink_prefix:
repository.permalink_prefix = 'https://raw.githubusercontent.com/mozilla/pontoon-intro/master/static/locales/{locale_code}'
repository.save()
class Migration(migrations.Migration):
dependencies = [
('base', '0094_improve_calculate_stats_performance'),
]
operations = [
migrations.RunPython(load_initial_data),
]
|
Add required field data for the initial project
The permalink_prefix field has been add to the Repository object and
made required, but we never added a data migration to populate it for
the initial project (Pontoon Intro). So let's do this now.# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-07-28 23:15
from __future__ import unicode_literals
from django.db import migrations
def load_initial_data(apps, schema_editor):
Project = apps.get_model('base', 'Project')
repository = Project.objects.get(slug="pontoon-intro").repositories.first()
if not repository.permalink_prefix:
repository.permalink_prefix = 'https://raw.githubusercontent.com/mozilla/pontoon-intro/master/static/locales/{locale_code}'
repository.save()
class Migration(migrations.Migration):
dependencies = [
('base', '0094_improve_calculate_stats_performance'),
]
operations = [
migrations.RunPython(load_initial_data),
]
|
<commit_before><commit_msg>Add required field data for the initial project
The permalink_prefix field has been add to the Repository object and
made required, but we never added a data migration to populate it for
the initial project (Pontoon Intro). So let's do this now.<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-07-28 23:15
from __future__ import unicode_literals
from django.db import migrations
def load_initial_data(apps, schema_editor):
Project = apps.get_model('base', 'Project')
repository = Project.objects.get(slug="pontoon-intro").repositories.first()
if not repository.permalink_prefix:
repository.permalink_prefix = 'https://raw.githubusercontent.com/mozilla/pontoon-intro/master/static/locales/{locale_code}'
repository.save()
class Migration(migrations.Migration):
dependencies = [
('base', '0094_improve_calculate_stats_performance'),
]
operations = [
migrations.RunPython(load_initial_data),
]
|
|
c26498c9295ffca534d42d80d7d22e53c19822b2
|
byceps/typing.py
|
byceps/typing.py
|
"""
byceps.typing
~~~~~~~~~~~~~
BYCEPS-specific type aliases for PEP 484 type hints
:Copyright: 2006-2017 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from uuid import UUID
UserID = UUID
BrandID = str
PartyID = str
|
Add module with BYCEPS-specific types for PEP 484 type hints
|
Add module with BYCEPS-specific types for PEP 484 type hints
|
Python
|
bsd-3-clause
|
homeworkprod/byceps,homeworkprod/byceps,m-ober/byceps,homeworkprod/byceps,m-ober/byceps,m-ober/byceps
|
Add module with BYCEPS-specific types for PEP 484 type hints
|
"""
byceps.typing
~~~~~~~~~~~~~
BYCEPS-specific type aliases for PEP 484 type hints
:Copyright: 2006-2017 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from uuid import UUID
UserID = UUID
BrandID = str
PartyID = str
|
<commit_before><commit_msg>Add module with BYCEPS-specific types for PEP 484 type hints<commit_after>
|
"""
byceps.typing
~~~~~~~~~~~~~
BYCEPS-specific type aliases for PEP 484 type hints
:Copyright: 2006-2017 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from uuid import UUID
UserID = UUID
BrandID = str
PartyID = str
|
Add module with BYCEPS-specific types for PEP 484 type hints"""
byceps.typing
~~~~~~~~~~~~~
BYCEPS-specific type aliases for PEP 484 type hints
:Copyright: 2006-2017 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from uuid import UUID
UserID = UUID
BrandID = str
PartyID = str
|
<commit_before><commit_msg>Add module with BYCEPS-specific types for PEP 484 type hints<commit_after>"""
byceps.typing
~~~~~~~~~~~~~
BYCEPS-specific type aliases for PEP 484 type hints
:Copyright: 2006-2017 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from uuid import UUID
UserID = UUID
BrandID = str
PartyID = str
|
|
bf897cd1ee43cf970dc9b0b955db0c8c3a2dbbe4
|
src/nodeconductor_openstack/tests/test_instance.py
|
src/nodeconductor_openstack/tests/test_instance.py
|
from rest_framework import status, test
from nodeconductor.structure.models import CustomerRole
from nodeconductor.structure.tests import factories as structure_factories
from ..apps import OpenStackConfig
from . import factories
class InstanceProvisionTest(test.APITransactionTestCase):
def setUp(self):
self.customer = structure_factories.CustomerFactory()
self.settings = structure_factories.ServiceSettingsFactory(
customer=self.customer, type=OpenStackConfig.service_name)
self.service = factories.OpenStackServiceFactory(customer=self.customer, settings=self.settings)
self.image = factories.ImageFactory(settings=self.settings, min_disk=10240, min_ram=1024)
self.flavor = factories.FlavorFactory(settings=self.settings)
self.project = structure_factories.ProjectFactory(customer=self.customer)
self.link = factories.OpenStackServiceProjectLinkFactory(service=self.service, project=self.project)
self.tenant = factories.TenantFactory(service_project_link=self.link)
self.customer_owner = structure_factories.UserFactory()
self.customer.add_user(self.customer_owner, CustomerRole.OWNER)
self.client.force_authenticate(user=self.customer_owner)
self.url = factories.InstanceFactory.get_list_url()
def test_user_can_provision_instance_with_internal_ip_only(self):
self.tenant.external_network_id = ''
self.tenant.save()
response = self.client.post(self.url, self.get_valid_data(
skip_external_ip_assignment=True
))
self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data)
def test_user_can_provision_instance_with_automatic_external_ip(self):
response = self.client.post(self.url, self.get_valid_data(
skip_external_ip_assignment=False
))
self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data)
def test_user_can_provision_instance_with_manual_external_ip(self):
floating_ip = factories.FloatingIPFactory(
service_project_link=self.link, tenant=self.tenant, status='DOWN')
response = self.client.post(self.url, self.get_valid_data(
floating_ip=factories.FloatingIPFactory.get_url(floating_ip),
skip_external_ip_assignment=True
))
self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data)
def get_valid_data(self, **extra):
default = {
'service_project_link': factories.OpenStackServiceProjectLinkFactory.get_url(self.link),
'tenant': factories.TenantFactory.get_url(self.tenant),
'flavor': factories.FlavorFactory.get_url(self.flavor),
'image': factories.ImageFactory.get_url(self.image),
'name': 'Valid name',
'system_volume_size': self.image.min_disk
}
default.update(extra)
return default
|
Add unit tests for provisioning instance with floating IP (NC-1470)
|
Add unit tests for provisioning instance with floating IP (NC-1470)
|
Python
|
mit
|
opennode/nodeconductor-openstack
|
Add unit tests for provisioning instance with floating IP (NC-1470)
|
from rest_framework import status, test
from nodeconductor.structure.models import CustomerRole
from nodeconductor.structure.tests import factories as structure_factories
from ..apps import OpenStackConfig
from . import factories
class InstanceProvisionTest(test.APITransactionTestCase):
def setUp(self):
self.customer = structure_factories.CustomerFactory()
self.settings = structure_factories.ServiceSettingsFactory(
customer=self.customer, type=OpenStackConfig.service_name)
self.service = factories.OpenStackServiceFactory(customer=self.customer, settings=self.settings)
self.image = factories.ImageFactory(settings=self.settings, min_disk=10240, min_ram=1024)
self.flavor = factories.FlavorFactory(settings=self.settings)
self.project = structure_factories.ProjectFactory(customer=self.customer)
self.link = factories.OpenStackServiceProjectLinkFactory(service=self.service, project=self.project)
self.tenant = factories.TenantFactory(service_project_link=self.link)
self.customer_owner = structure_factories.UserFactory()
self.customer.add_user(self.customer_owner, CustomerRole.OWNER)
self.client.force_authenticate(user=self.customer_owner)
self.url = factories.InstanceFactory.get_list_url()
def test_user_can_provision_instance_with_internal_ip_only(self):
self.tenant.external_network_id = ''
self.tenant.save()
response = self.client.post(self.url, self.get_valid_data(
skip_external_ip_assignment=True
))
self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data)
def test_user_can_provision_instance_with_automatic_external_ip(self):
response = self.client.post(self.url, self.get_valid_data(
skip_external_ip_assignment=False
))
self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data)
def test_user_can_provision_instance_with_manual_external_ip(self):
floating_ip = factories.FloatingIPFactory(
service_project_link=self.link, tenant=self.tenant, status='DOWN')
response = self.client.post(self.url, self.get_valid_data(
floating_ip=factories.FloatingIPFactory.get_url(floating_ip),
skip_external_ip_assignment=True
))
self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data)
def get_valid_data(self, **extra):
default = {
'service_project_link': factories.OpenStackServiceProjectLinkFactory.get_url(self.link),
'tenant': factories.TenantFactory.get_url(self.tenant),
'flavor': factories.FlavorFactory.get_url(self.flavor),
'image': factories.ImageFactory.get_url(self.image),
'name': 'Valid name',
'system_volume_size': self.image.min_disk
}
default.update(extra)
return default
|
<commit_before><commit_msg>Add unit tests for provisioning instance with floating IP (NC-1470)<commit_after>
|
from rest_framework import status, test
from nodeconductor.structure.models import CustomerRole
from nodeconductor.structure.tests import factories as structure_factories
from ..apps import OpenStackConfig
from . import factories
class InstanceProvisionTest(test.APITransactionTestCase):
def setUp(self):
self.customer = structure_factories.CustomerFactory()
self.settings = structure_factories.ServiceSettingsFactory(
customer=self.customer, type=OpenStackConfig.service_name)
self.service = factories.OpenStackServiceFactory(customer=self.customer, settings=self.settings)
self.image = factories.ImageFactory(settings=self.settings, min_disk=10240, min_ram=1024)
self.flavor = factories.FlavorFactory(settings=self.settings)
self.project = structure_factories.ProjectFactory(customer=self.customer)
self.link = factories.OpenStackServiceProjectLinkFactory(service=self.service, project=self.project)
self.tenant = factories.TenantFactory(service_project_link=self.link)
self.customer_owner = structure_factories.UserFactory()
self.customer.add_user(self.customer_owner, CustomerRole.OWNER)
self.client.force_authenticate(user=self.customer_owner)
self.url = factories.InstanceFactory.get_list_url()
def test_user_can_provision_instance_with_internal_ip_only(self):
self.tenant.external_network_id = ''
self.tenant.save()
response = self.client.post(self.url, self.get_valid_data(
skip_external_ip_assignment=True
))
self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data)
def test_user_can_provision_instance_with_automatic_external_ip(self):
response = self.client.post(self.url, self.get_valid_data(
skip_external_ip_assignment=False
))
self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data)
def test_user_can_provision_instance_with_manual_external_ip(self):
floating_ip = factories.FloatingIPFactory(
service_project_link=self.link, tenant=self.tenant, status='DOWN')
response = self.client.post(self.url, self.get_valid_data(
floating_ip=factories.FloatingIPFactory.get_url(floating_ip),
skip_external_ip_assignment=True
))
self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data)
def get_valid_data(self, **extra):
default = {
'service_project_link': factories.OpenStackServiceProjectLinkFactory.get_url(self.link),
'tenant': factories.TenantFactory.get_url(self.tenant),
'flavor': factories.FlavorFactory.get_url(self.flavor),
'image': factories.ImageFactory.get_url(self.image),
'name': 'Valid name',
'system_volume_size': self.image.min_disk
}
default.update(extra)
return default
|
Add unit tests for provisioning instance with floating IP (NC-1470)from rest_framework import status, test
from nodeconductor.structure.models import CustomerRole
from nodeconductor.structure.tests import factories as structure_factories
from ..apps import OpenStackConfig
from . import factories
class InstanceProvisionTest(test.APITransactionTestCase):
def setUp(self):
self.customer = structure_factories.CustomerFactory()
self.settings = structure_factories.ServiceSettingsFactory(
customer=self.customer, type=OpenStackConfig.service_name)
self.service = factories.OpenStackServiceFactory(customer=self.customer, settings=self.settings)
self.image = factories.ImageFactory(settings=self.settings, min_disk=10240, min_ram=1024)
self.flavor = factories.FlavorFactory(settings=self.settings)
self.project = structure_factories.ProjectFactory(customer=self.customer)
self.link = factories.OpenStackServiceProjectLinkFactory(service=self.service, project=self.project)
self.tenant = factories.TenantFactory(service_project_link=self.link)
self.customer_owner = structure_factories.UserFactory()
self.customer.add_user(self.customer_owner, CustomerRole.OWNER)
self.client.force_authenticate(user=self.customer_owner)
self.url = factories.InstanceFactory.get_list_url()
def test_user_can_provision_instance_with_internal_ip_only(self):
self.tenant.external_network_id = ''
self.tenant.save()
response = self.client.post(self.url, self.get_valid_data(
skip_external_ip_assignment=True
))
self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data)
def test_user_can_provision_instance_with_automatic_external_ip(self):
response = self.client.post(self.url, self.get_valid_data(
skip_external_ip_assignment=False
))
self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data)
def test_user_can_provision_instance_with_manual_external_ip(self):
floating_ip = factories.FloatingIPFactory(
service_project_link=self.link, tenant=self.tenant, status='DOWN')
response = self.client.post(self.url, self.get_valid_data(
floating_ip=factories.FloatingIPFactory.get_url(floating_ip),
skip_external_ip_assignment=True
))
self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data)
def get_valid_data(self, **extra):
default = {
'service_project_link': factories.OpenStackServiceProjectLinkFactory.get_url(self.link),
'tenant': factories.TenantFactory.get_url(self.tenant),
'flavor': factories.FlavorFactory.get_url(self.flavor),
'image': factories.ImageFactory.get_url(self.image),
'name': 'Valid name',
'system_volume_size': self.image.min_disk
}
default.update(extra)
return default
|
<commit_before><commit_msg>Add unit tests for provisioning instance with floating IP (NC-1470)<commit_after>from rest_framework import status, test
from nodeconductor.structure.models import CustomerRole
from nodeconductor.structure.tests import factories as structure_factories
from ..apps import OpenStackConfig
from . import factories
class InstanceProvisionTest(test.APITransactionTestCase):
def setUp(self):
self.customer = structure_factories.CustomerFactory()
self.settings = structure_factories.ServiceSettingsFactory(
customer=self.customer, type=OpenStackConfig.service_name)
self.service = factories.OpenStackServiceFactory(customer=self.customer, settings=self.settings)
self.image = factories.ImageFactory(settings=self.settings, min_disk=10240, min_ram=1024)
self.flavor = factories.FlavorFactory(settings=self.settings)
self.project = structure_factories.ProjectFactory(customer=self.customer)
self.link = factories.OpenStackServiceProjectLinkFactory(service=self.service, project=self.project)
self.tenant = factories.TenantFactory(service_project_link=self.link)
self.customer_owner = structure_factories.UserFactory()
self.customer.add_user(self.customer_owner, CustomerRole.OWNER)
self.client.force_authenticate(user=self.customer_owner)
self.url = factories.InstanceFactory.get_list_url()
def test_user_can_provision_instance_with_internal_ip_only(self):
self.tenant.external_network_id = ''
self.tenant.save()
response = self.client.post(self.url, self.get_valid_data(
skip_external_ip_assignment=True
))
self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data)
def test_user_can_provision_instance_with_automatic_external_ip(self):
response = self.client.post(self.url, self.get_valid_data(
skip_external_ip_assignment=False
))
self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data)
def test_user_can_provision_instance_with_manual_external_ip(self):
floating_ip = factories.FloatingIPFactory(
service_project_link=self.link, tenant=self.tenant, status='DOWN')
response = self.client.post(self.url, self.get_valid_data(
floating_ip=factories.FloatingIPFactory.get_url(floating_ip),
skip_external_ip_assignment=True
))
self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data)
def get_valid_data(self, **extra):
default = {
'service_project_link': factories.OpenStackServiceProjectLinkFactory.get_url(self.link),
'tenant': factories.TenantFactory.get_url(self.tenant),
'flavor': factories.FlavorFactory.get_url(self.flavor),
'image': factories.ImageFactory.get_url(self.image),
'name': 'Valid name',
'system_volume_size': self.image.min_disk
}
default.update(extra)
return default
|
|
3bf6d6e13f7611b127582ab862d46228b2164a95
|
st2common/bin/migrations/v2.1/st2-migrate-runners.py
|
st2common/bin/migrations/v2.1/st2-migrate-runners.py
|
#!/usr/bin/env python
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from eventlet.green import subprocess
from st2common.util.green.shell import run_command
def main():
timeout = 180
args = [
'st2ctl',
'reload',
'--register-all',
'--register-fail-on-failure'
]
exit_code, stdout, stderr, timed_out = run_command(cmd=args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=False,
timeout=timeout)
if timed_out:
print('Timed out migrating runners!')
sys.exit(1)
if exit_code == 0:
print('Migrated runners successfully! Run `st2 runner list` to see available runners.')
else:
print('Error migrating runners! ' + \
'exit code: %d\n\nstderr: %s\n\nstdout: %s\n\n' % (exit_code, stderr, stdout))
sys.exit(exit_code)
if __name__ == '__main__':
main()
|
Add migration script to register runners
|
Add migration script to register runners
|
Python
|
apache-2.0
|
peak6/st2,nzlosh/st2,Plexxi/st2,peak6/st2,tonybaloney/st2,StackStorm/st2,Plexxi/st2,tonybaloney/st2,lakshmi-kannan/st2,pixelrebel/st2,StackStorm/st2,nzlosh/st2,StackStorm/st2,lakshmi-kannan/st2,tonybaloney/st2,Plexxi/st2,pixelrebel/st2,pixelrebel/st2,nzlosh/st2,lakshmi-kannan/st2,nzlosh/st2,StackStorm/st2,Plexxi/st2,peak6/st2
|
Add migration script to register runners
|
#!/usr/bin/env python
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from eventlet.green import subprocess
from st2common.util.green.shell import run_command
def main():
timeout = 180
args = [
'st2ctl',
'reload',
'--register-all',
'--register-fail-on-failure'
]
exit_code, stdout, stderr, timed_out = run_command(cmd=args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=False,
timeout=timeout)
if timed_out:
print('Timed out migrating runners!')
sys.exit(1)
if exit_code == 0:
print('Migrated runners successfully! Run `st2 runner list` to see available runners.')
else:
print('Error migrating runners! ' + \
'exit code: %d\n\nstderr: %s\n\nstdout: %s\n\n' % (exit_code, stderr, stdout))
sys.exit(exit_code)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add migration script to register runners<commit_after>
|
#!/usr/bin/env python
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from eventlet.green import subprocess
from st2common.util.green.shell import run_command
def main():
timeout = 180
args = [
'st2ctl',
'reload',
'--register-all',
'--register-fail-on-failure'
]
exit_code, stdout, stderr, timed_out = run_command(cmd=args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=False,
timeout=timeout)
if timed_out:
print('Timed out migrating runners!')
sys.exit(1)
if exit_code == 0:
print('Migrated runners successfully! Run `st2 runner list` to see available runners.')
else:
print('Error migrating runners! ' + \
'exit code: %d\n\nstderr: %s\n\nstdout: %s\n\n' % (exit_code, stderr, stdout))
sys.exit(exit_code)
if __name__ == '__main__':
main()
|
Add migration script to register runners#!/usr/bin/env python
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from eventlet.green import subprocess
from st2common.util.green.shell import run_command
def main():
timeout = 180
args = [
'st2ctl',
'reload',
'--register-all',
'--register-fail-on-failure'
]
exit_code, stdout, stderr, timed_out = run_command(cmd=args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=False,
timeout=timeout)
if timed_out:
print('Timed out migrating runners!')
sys.exit(1)
if exit_code == 0:
print('Migrated runners successfully! Run `st2 runner list` to see available runners.')
else:
print('Error migrating runners! ' + \
'exit code: %d\n\nstderr: %s\n\nstdout: %s\n\n' % (exit_code, stderr, stdout))
sys.exit(exit_code)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add migration script to register runners<commit_after>#!/usr/bin/env python
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from eventlet.green import subprocess
from st2common.util.green.shell import run_command
def main():
timeout = 180
args = [
'st2ctl',
'reload',
'--register-all',
'--register-fail-on-failure'
]
exit_code, stdout, stderr, timed_out = run_command(cmd=args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=False,
timeout=timeout)
if timed_out:
print('Timed out migrating runners!')
sys.exit(1)
if exit_code == 0:
print('Migrated runners successfully! Run `st2 runner list` to see available runners.')
else:
print('Error migrating runners! ' + \
'exit code: %d\n\nstderr: %s\n\nstdout: %s\n\n' % (exit_code, stderr, stdout))
sys.exit(exit_code)
if __name__ == '__main__':
main()
|
|
5d741bb4a0f0cdab21a23951507e42b257acc2a6
|
examples/timer_spinner.py
|
examples/timer_spinner.py
|
# -*- coding: utf-8 -*-
"""
examples.timer_spinner
~~~~~~~~~~~~~~~~~~~~~~
Show elapsed time at the end of the line.
"""
import time
from yaspin import yaspin
def main():
with yaspin(text="elapsed time", timer=True) as sp:
# Floats are rounded into two decimal digits in timer output
time.sleep(3.1415)
sp.ok()
if __name__ == "__main__":
main()
|
Add usage example for timer feature
|
Add usage example for timer feature
|
Python
|
mit
|
pavdmyt/yaspin
|
Add usage example for timer feature
|
# -*- coding: utf-8 -*-
"""
examples.timer_spinner
~~~~~~~~~~~~~~~~~~~~~~
Show elapsed time at the end of the line.
"""
import time
from yaspin import yaspin
def main():
with yaspin(text="elapsed time", timer=True) as sp:
# Floats are rounded into two decimal digits in timer output
time.sleep(3.1415)
sp.ok()
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add usage example for timer feature<commit_after>
|
# -*- coding: utf-8 -*-
"""
examples.timer_spinner
~~~~~~~~~~~~~~~~~~~~~~
Show elapsed time at the end of the line.
"""
import time
from yaspin import yaspin
def main():
with yaspin(text="elapsed time", timer=True) as sp:
# Floats are rounded into two decimal digits in timer output
time.sleep(3.1415)
sp.ok()
if __name__ == "__main__":
main()
|
Add usage example for timer feature# -*- coding: utf-8 -*-
"""
examples.timer_spinner
~~~~~~~~~~~~~~~~~~~~~~
Show elapsed time at the end of the line.
"""
import time
from yaspin import yaspin
def main():
with yaspin(text="elapsed time", timer=True) as sp:
# Floats are rounded into two decimal digits in timer output
time.sleep(3.1415)
sp.ok()
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add usage example for timer feature<commit_after># -*- coding: utf-8 -*-
"""
examples.timer_spinner
~~~~~~~~~~~~~~~~~~~~~~
Show elapsed time at the end of the line.
"""
import time
from yaspin import yaspin
def main():
with yaspin(text="elapsed time", timer=True) as sp:
# Floats are rounded into two decimal digits in timer output
time.sleep(3.1415)
sp.ok()
if __name__ == "__main__":
main()
|
|
4223ed495639650a1b2ffdbc76546c8cbb7cc90f
|
moksha/tests/quickstarts/test_consumer.py
|
moksha/tests/quickstarts/test_consumer.py
|
import inspect
import pkg_resources
from datetime import timedelta
from moksha.api.hub import Consumer
from moksha.pastetemplate import MokshaConsumerTemplate
from base import QuickstartTester
class TestConsumerQuickstart(QuickstartTester):
def __init__(self,**options):
self.app = None
self.template_vars = {
'package': 'mokshatest',
'project': 'mokshatest',
'egg': 'mokshatest',
'egg_plugins': ['Moksha'],
'topic': 'moksha.topics.test',
}
self.args = {
'consumer': True,
'consumer_name': 'MokshatestConsumer',
}
self.template = MokshaConsumerTemplate
self.templates = ['moksha.consumer']
def get_entry(self):
for consumer in pkg_resources.working_set.iter_entry_points('moksha.consumer'):
consumer_class = consumer.load()
if inspect.isclass(consumer_class):
name = consumer_class.__name__
else:
name = consumer_class.__class__.__name__
if name == 'MokshatestConsumer':
return consumer_class
def test_entry_point(self):
assert self.get_entry(), \
"Cannot find MokshatestConsumer on `moksha.consumer` entry-point"
def test_polling_dataconsumer(self):
consumer = self.get_entry()
assert isinstance(consumer, Consumer) or \
issubclass(consumer, Consumer)
def test_consumer_topic(self):
""" Ensure the Consumer has a topic """
consumer = self.get_entry()
assert hasattr(consumer, 'topic')
def test_consumer_consume(self):
""" Ensure our Consumer has a `consume` method """
consumer = self.get_entry()
assert hasattr(consumer, 'consume')
|
Add some tests for our consumer quickstart
|
Add some tests for our consumer quickstart
|
Python
|
apache-2.0
|
pombredanne/moksha,ralphbean/moksha,lmacken/moksha,mokshaproject/moksha,mokshaproject/moksha,ralphbean/moksha,lmacken/moksha,pombredanne/moksha,lmacken/moksha,ralphbean/moksha,mokshaproject/moksha,pombredanne/moksha,mokshaproject/moksha,pombredanne/moksha
|
Add some tests for our consumer quickstart
|
import inspect
import pkg_resources
from datetime import timedelta
from moksha.api.hub import Consumer
from moksha.pastetemplate import MokshaConsumerTemplate
from base import QuickstartTester
class TestConsumerQuickstart(QuickstartTester):
def __init__(self,**options):
self.app = None
self.template_vars = {
'package': 'mokshatest',
'project': 'mokshatest',
'egg': 'mokshatest',
'egg_plugins': ['Moksha'],
'topic': 'moksha.topics.test',
}
self.args = {
'consumer': True,
'consumer_name': 'MokshatestConsumer',
}
self.template = MokshaConsumerTemplate
self.templates = ['moksha.consumer']
def get_entry(self):
for consumer in pkg_resources.working_set.iter_entry_points('moksha.consumer'):
consumer_class = consumer.load()
if inspect.isclass(consumer_class):
name = consumer_class.__name__
else:
name = consumer_class.__class__.__name__
if name == 'MokshatestConsumer':
return consumer_class
def test_entry_point(self):
assert self.get_entry(), \
"Cannot find MokshatestConsumer on `moksha.consumer` entry-point"
def test_polling_dataconsumer(self):
consumer = self.get_entry()
assert isinstance(consumer, Consumer) or \
issubclass(consumer, Consumer)
def test_consumer_topic(self):
""" Ensure the Consumer has a topic """
consumer = self.get_entry()
assert hasattr(consumer, 'topic')
def test_consumer_consume(self):
""" Ensure our Consumer has a `consume` method """
consumer = self.get_entry()
assert hasattr(consumer, 'consume')
|
<commit_before><commit_msg>Add some tests for our consumer quickstart<commit_after>
|
import inspect
import pkg_resources
from datetime import timedelta
from moksha.api.hub import Consumer
from moksha.pastetemplate import MokshaConsumerTemplate
from base import QuickstartTester
class TestConsumerQuickstart(QuickstartTester):
def __init__(self,**options):
self.app = None
self.template_vars = {
'package': 'mokshatest',
'project': 'mokshatest',
'egg': 'mokshatest',
'egg_plugins': ['Moksha'],
'topic': 'moksha.topics.test',
}
self.args = {
'consumer': True,
'consumer_name': 'MokshatestConsumer',
}
self.template = MokshaConsumerTemplate
self.templates = ['moksha.consumer']
def get_entry(self):
for consumer in pkg_resources.working_set.iter_entry_points('moksha.consumer'):
consumer_class = consumer.load()
if inspect.isclass(consumer_class):
name = consumer_class.__name__
else:
name = consumer_class.__class__.__name__
if name == 'MokshatestConsumer':
return consumer_class
def test_entry_point(self):
assert self.get_entry(), \
"Cannot find MokshatestConsumer on `moksha.consumer` entry-point"
def test_polling_dataconsumer(self):
consumer = self.get_entry()
assert isinstance(consumer, Consumer) or \
issubclass(consumer, Consumer)
def test_consumer_topic(self):
""" Ensure the Consumer has a topic """
consumer = self.get_entry()
assert hasattr(consumer, 'topic')
def test_consumer_consume(self):
""" Ensure our Consumer has a `consume` method """
consumer = self.get_entry()
assert hasattr(consumer, 'consume')
|
Add some tests for our consumer quickstartimport inspect
import pkg_resources
from datetime import timedelta
from moksha.api.hub import Consumer
from moksha.pastetemplate import MokshaConsumerTemplate
from base import QuickstartTester
class TestConsumerQuickstart(QuickstartTester):
def __init__(self,**options):
self.app = None
self.template_vars = {
'package': 'mokshatest',
'project': 'mokshatest',
'egg': 'mokshatest',
'egg_plugins': ['Moksha'],
'topic': 'moksha.topics.test',
}
self.args = {
'consumer': True,
'consumer_name': 'MokshatestConsumer',
}
self.template = MokshaConsumerTemplate
self.templates = ['moksha.consumer']
def get_entry(self):
for consumer in pkg_resources.working_set.iter_entry_points('moksha.consumer'):
consumer_class = consumer.load()
if inspect.isclass(consumer_class):
name = consumer_class.__name__
else:
name = consumer_class.__class__.__name__
if name == 'MokshatestConsumer':
return consumer_class
def test_entry_point(self):
assert self.get_entry(), \
"Cannot find MokshatestConsumer on `moksha.consumer` entry-point"
def test_polling_dataconsumer(self):
consumer = self.get_entry()
assert isinstance(consumer, Consumer) or \
issubclass(consumer, Consumer)
def test_consumer_topic(self):
""" Ensure the Consumer has a topic """
consumer = self.get_entry()
assert hasattr(consumer, 'topic')
def test_consumer_consume(self):
""" Ensure our Consumer has a `consume` method """
consumer = self.get_entry()
assert hasattr(consumer, 'consume')
|
<commit_before><commit_msg>Add some tests for our consumer quickstart<commit_after>import inspect
import pkg_resources
from datetime import timedelta
from moksha.api.hub import Consumer
from moksha.pastetemplate import MokshaConsumerTemplate
from base import QuickstartTester
class TestConsumerQuickstart(QuickstartTester):
def __init__(self,**options):
self.app = None
self.template_vars = {
'package': 'mokshatest',
'project': 'mokshatest',
'egg': 'mokshatest',
'egg_plugins': ['Moksha'],
'topic': 'moksha.topics.test',
}
self.args = {
'consumer': True,
'consumer_name': 'MokshatestConsumer',
}
self.template = MokshaConsumerTemplate
self.templates = ['moksha.consumer']
def get_entry(self):
for consumer in pkg_resources.working_set.iter_entry_points('moksha.consumer'):
consumer_class = consumer.load()
if inspect.isclass(consumer_class):
name = consumer_class.__name__
else:
name = consumer_class.__class__.__name__
if name == 'MokshatestConsumer':
return consumer_class
def test_entry_point(self):
assert self.get_entry(), \
"Cannot find MokshatestConsumer on `moksha.consumer` entry-point"
def test_polling_dataconsumer(self):
consumer = self.get_entry()
assert isinstance(consumer, Consumer) or \
issubclass(consumer, Consumer)
def test_consumer_topic(self):
""" Ensure the Consumer has a topic """
consumer = self.get_entry()
assert hasattr(consumer, 'topic')
def test_consumer_consume(self):
""" Ensure our Consumer has a `consume` method """
consumer = self.get_entry()
assert hasattr(consumer, 'consume')
|
|
6f74d7d1b95eb2b4b2b07e48fab7d935069a9c7c
|
generate-upload-stream.py
|
generate-upload-stream.py
|
#!/usr/bin/env python3
import fileinput
import hashlib
import itertools
import random
import re
import sys
import timer
import utils
"""
Generates an upload request stream from the data collected by file_counts.py.
The input is read from stdin and the compact, binary output is written to
stdout.
Usage:
cat <FILE_POPULARITY_DATA_FILE> | ./generate-upload-stream > <OUTPUT_FILE>
"""
# 20 bytes for the SHA1 hash, 5 bytes for the file size.
BYTES_PER_UPLOAD = 25
# The number of steps between printing reports during long lasting computation.
REPORT_FREQUENCY = 1000000
@utils.timeit
def read_input():
with fileinput.input() as lines:
uploads = []
i = 0
tmr = timer.Timer()
for line in lines:
hsh, count, size = line.split(" ")
if "-" in size or "-" in count:
continue
uploads += [int(hsh, 16) | int(size) << 160] * int(count)
i += 1
if i % REPORT_FREQUENCY == 0:
print("%s files, %s uploads, time %s, %s" % (
utils.num_fmt(i),
utils.num_fmt(len(uploads)),
tmr.elapsed_str,
utils.get_mem_info(uploads)), file=sys.stderr)
tmr.reset()
return uploads
@utils.timeit
def generate_uploads(uploads):
i = 0
digest = hashlib.sha256()
tmr = timer.Timer()
for upload in utils.shuffle(uploads):
i += 1
if i % REPORT_FREQUENCY == 0:
print("%s uploads, time %s, %s" % (
utils.num_fmt(i),
tmr.elapsed_str,
utils.get_mem_info()
), file=sys.stderr)
tmr.reset()
try:
encoded = upload.to_bytes(BYTES_PER_UPLOAD, byteorder='big')
except OverflowError as e:
print("ERROR: size | hash does not fit into %i bytes" % BYTES_PER_UPLOAD, file=sys.stderr)
raise e
digest.update(encoded)
sys.stdout.buffer.write(encoded)
print("Finished. SHA256 digest of the data: %s" % digest.hexdigest(), file=sys.stderr)
@utils.timeit
def main():
uploads = read_input()
generate_uploads(uploads)
if __name__ == "__main__":
sys.exit(main())
|
Add script for generating upload request stream
|
Add script for generating upload request stream
|
Python
|
apache-2.0
|
sjakthol/dedup-simulator,sjakthol/dedup-simulator
|
Add script for generating upload request stream
|
#!/usr/bin/env python3
import fileinput
import hashlib
import itertools
import random
import re
import sys
import timer
import utils
"""
Generates an upload request stream from the data collected by file_counts.py.
The input is read from stdin and the compact, binary output is written to
stdout.
Usage:
cat <FILE_POPULARITY_DATA_FILE> | ./generate-upload-stream > <OUTPUT_FILE>
"""
# 20 bytes for the SHA1 hash, 5 bytes for the file size.
BYTES_PER_UPLOAD = 25
# The number of steps between printing reports during long lasting computation.
REPORT_FREQUENCY = 1000000
@utils.timeit
def read_input():
with fileinput.input() as lines:
uploads = []
i = 0
tmr = timer.Timer()
for line in lines:
hsh, count, size = line.split(" ")
if "-" in size or "-" in count:
continue
uploads += [int(hsh, 16) | int(size) << 160] * int(count)
i += 1
if i % REPORT_FREQUENCY == 0:
print("%s files, %s uploads, time %s, %s" % (
utils.num_fmt(i),
utils.num_fmt(len(uploads)),
tmr.elapsed_str,
utils.get_mem_info(uploads)), file=sys.stderr)
tmr.reset()
return uploads
@utils.timeit
def generate_uploads(uploads):
i = 0
digest = hashlib.sha256()
tmr = timer.Timer()
for upload in utils.shuffle(uploads):
i += 1
if i % REPORT_FREQUENCY == 0:
print("%s uploads, time %s, %s" % (
utils.num_fmt(i),
tmr.elapsed_str,
utils.get_mem_info()
), file=sys.stderr)
tmr.reset()
try:
encoded = upload.to_bytes(BYTES_PER_UPLOAD, byteorder='big')
except OverflowError as e:
print("ERROR: size | hash does not fit into %i bytes" % BYTES_PER_UPLOAD, file=sys.stderr)
raise e
digest.update(encoded)
sys.stdout.buffer.write(encoded)
print("Finished. SHA256 digest of the data: %s" % digest.hexdigest(), file=sys.stderr)
@utils.timeit
def main():
uploads = read_input()
generate_uploads(uploads)
if __name__ == "__main__":
sys.exit(main())
|
<commit_before><commit_msg>Add script for generating upload request stream<commit_after>
|
#!/usr/bin/env python3
import fileinput
import hashlib
import itertools
import random
import re
import sys
import timer
import utils
"""
Generates an upload request stream from the data collected by file_counts.py.
The input is read from stdin and the compact, binary output is written to
stdout.
Usage:
cat <FILE_POPULARITY_DATA_FILE> | ./generate-upload-stream > <OUTPUT_FILE>
"""
# 20 bytes for the SHA1 hash, 5 bytes for the file size.
BYTES_PER_UPLOAD = 25
# The number of steps between printing reports during long lasting computation.
REPORT_FREQUENCY = 1000000
@utils.timeit
def read_input():
with fileinput.input() as lines:
uploads = []
i = 0
tmr = timer.Timer()
for line in lines:
hsh, count, size = line.split(" ")
if "-" in size or "-" in count:
continue
uploads += [int(hsh, 16) | int(size) << 160] * int(count)
i += 1
if i % REPORT_FREQUENCY == 0:
print("%s files, %s uploads, time %s, %s" % (
utils.num_fmt(i),
utils.num_fmt(len(uploads)),
tmr.elapsed_str,
utils.get_mem_info(uploads)), file=sys.stderr)
tmr.reset()
return uploads
@utils.timeit
def generate_uploads(uploads):
i = 0
digest = hashlib.sha256()
tmr = timer.Timer()
for upload in utils.shuffle(uploads):
i += 1
if i % REPORT_FREQUENCY == 0:
print("%s uploads, time %s, %s" % (
utils.num_fmt(i),
tmr.elapsed_str,
utils.get_mem_info()
), file=sys.stderr)
tmr.reset()
try:
encoded = upload.to_bytes(BYTES_PER_UPLOAD, byteorder='big')
except OverflowError as e:
print("ERROR: size | hash does not fit into %i bytes" % BYTES_PER_UPLOAD, file=sys.stderr)
raise e
digest.update(encoded)
sys.stdout.buffer.write(encoded)
print("Finished. SHA256 digest of the data: %s" % digest.hexdigest(), file=sys.stderr)
@utils.timeit
def main():
uploads = read_input()
generate_uploads(uploads)
if __name__ == "__main__":
sys.exit(main())
|
Add script for generating upload request stream#!/usr/bin/env python3
import fileinput
import hashlib
import itertools
import random
import re
import sys
import timer
import utils
"""
Generates an upload request stream from the data collected by file_counts.py.
The input is read from stdin and the compact, binary output is written to
stdout.
Usage:
cat <FILE_POPULARITY_DATA_FILE> | ./generate-upload-stream > <OUTPUT_FILE>
"""
# 20 bytes for the SHA1 hash, 5 bytes for the file size.
BYTES_PER_UPLOAD = 25
# The number of steps between printing reports during long lasting computation.
REPORT_FREQUENCY = 1000000
@utils.timeit
def read_input():
with fileinput.input() as lines:
uploads = []
i = 0
tmr = timer.Timer()
for line in lines:
hsh, count, size = line.split(" ")
if "-" in size or "-" in count:
continue
uploads += [int(hsh, 16) | int(size) << 160] * int(count)
i += 1
if i % REPORT_FREQUENCY == 0:
print("%s files, %s uploads, time %s, %s" % (
utils.num_fmt(i),
utils.num_fmt(len(uploads)),
tmr.elapsed_str,
utils.get_mem_info(uploads)), file=sys.stderr)
tmr.reset()
return uploads
@utils.timeit
def generate_uploads(uploads):
i = 0
digest = hashlib.sha256()
tmr = timer.Timer()
for upload in utils.shuffle(uploads):
i += 1
if i % REPORT_FREQUENCY == 0:
print("%s uploads, time %s, %s" % (
utils.num_fmt(i),
tmr.elapsed_str,
utils.get_mem_info()
), file=sys.stderr)
tmr.reset()
try:
encoded = upload.to_bytes(BYTES_PER_UPLOAD, byteorder='big')
except OverflowError as e:
print("ERROR: size | hash does not fit into %i bytes" % BYTES_PER_UPLOAD, file=sys.stderr)
raise e
digest.update(encoded)
sys.stdout.buffer.write(encoded)
print("Finished. SHA256 digest of the data: %s" % digest.hexdigest(), file=sys.stderr)
@utils.timeit
def main():
uploads = read_input()
generate_uploads(uploads)
if __name__ == "__main__":
sys.exit(main())
|
<commit_before><commit_msg>Add script for generating upload request stream<commit_after>#!/usr/bin/env python3
import fileinput
import hashlib
import itertools
import random
import re
import sys
import timer
import utils
"""
Generates an upload request stream from the data collected by file_counts.py.
The input is read from stdin and the compact, binary output is written to
stdout.
Usage:
cat <FILE_POPULARITY_DATA_FILE> | ./generate-upload-stream > <OUTPUT_FILE>
"""
# 20 bytes for the SHA1 hash, 5 bytes for the file size.
BYTES_PER_UPLOAD = 25
# The number of steps between printing reports during long lasting computation.
REPORT_FREQUENCY = 1000000
@utils.timeit
def read_input():
with fileinput.input() as lines:
uploads = []
i = 0
tmr = timer.Timer()
for line in lines:
hsh, count, size = line.split(" ")
if "-" in size or "-" in count:
continue
uploads += [int(hsh, 16) | int(size) << 160] * int(count)
i += 1
if i % REPORT_FREQUENCY == 0:
print("%s files, %s uploads, time %s, %s" % (
utils.num_fmt(i),
utils.num_fmt(len(uploads)),
tmr.elapsed_str,
utils.get_mem_info(uploads)), file=sys.stderr)
tmr.reset()
return uploads
@utils.timeit
def generate_uploads(uploads):
i = 0
digest = hashlib.sha256()
tmr = timer.Timer()
for upload in utils.shuffle(uploads):
i += 1
if i % REPORT_FREQUENCY == 0:
print("%s uploads, time %s, %s" % (
utils.num_fmt(i),
tmr.elapsed_str,
utils.get_mem_info()
), file=sys.stderr)
tmr.reset()
try:
encoded = upload.to_bytes(BYTES_PER_UPLOAD, byteorder='big')
except OverflowError as e:
print("ERROR: size | hash does not fit into %i bytes" % BYTES_PER_UPLOAD, file=sys.stderr)
raise e
digest.update(encoded)
sys.stdout.buffer.write(encoded)
print("Finished. SHA256 digest of the data: %s" % digest.hexdigest(), file=sys.stderr)
@utils.timeit
def main():
uploads = read_input()
generate_uploads(uploads)
if __name__ == "__main__":
sys.exit(main())
|
|
27cc2a5fb13a9a45fd8af3b8bee1d0c66b7c8d91
|
cardinal/test_decorators.py
|
cardinal/test_decorators.py
|
import pytest
import decorators
def test_command():
# ensure commands is a list with foo added
@decorators.command('foo')
def foo():
pass
assert foo.commands == ['foo']
# test that you can pass a list
@decorators.command(['foo', 'bar'])
def foo():
pass
assert foo.commands == ['foo', 'bar']
# test that only one decorator can add commands
@decorators.command('foo')
@decorators.command('bar')
def foo():
pass
assert foo.commands == ['foo']
# only allow strings and lists
with pytest.raises(TypeError):
@decorators.command(True)
def foo():
pass
with pytest.raises(TypeError):
@decorators.command(5)
def foo():
pass
with pytest.raises(TypeError):
@decorators.command(('foo',))
def foo():
pass
def test_help():
# ensure help is a list with the line added
@decorators.help("This is a help line")
def foo():
pass
assert foo.help == ["This is a help line"]
# test the order of the help lines
@decorators.help("This is the first help line")
@decorators.help("This is the second help line")
def foo():
pass
assert foo.help == [
"This is the first help line",
"This is the second help line",
]
# only allow strings
with pytest.raises(TypeError):
@decorators.help(["This should raise an exception"])
def foo():
pass
with pytest.raises(TypeError):
@decorators.help(5)
def foo():
pass
with pytest.raises(TypeError):
@decorators.help(True)
def foo():
pass
with pytest.raises(TypeError):
@decorators.help(('foo',))
def foo():
pass
|
Add first unit test using py.test
|
Add first unit test using py.test
Tests Cardinal's decorators
|
Python
|
mit
|
BiohZn/Cardinal,JohnMaguire/Cardinal
|
Add first unit test using py.test
Tests Cardinal's decorators
|
import pytest
import decorators
def test_command():
# ensure commands is a list with foo added
@decorators.command('foo')
def foo():
pass
assert foo.commands == ['foo']
# test that you can pass a list
@decorators.command(['foo', 'bar'])
def foo():
pass
assert foo.commands == ['foo', 'bar']
# test that only one decorator can add commands
@decorators.command('foo')
@decorators.command('bar')
def foo():
pass
assert foo.commands == ['foo']
# only allow strings and lists
with pytest.raises(TypeError):
@decorators.command(True)
def foo():
pass
with pytest.raises(TypeError):
@decorators.command(5)
def foo():
pass
with pytest.raises(TypeError):
@decorators.command(('foo',))
def foo():
pass
def test_help():
# ensure help is a list with the line added
@decorators.help("This is a help line")
def foo():
pass
assert foo.help == ["This is a help line"]
# test the order of the help lines
@decorators.help("This is the first help line")
@decorators.help("This is the second help line")
def foo():
pass
assert foo.help == [
"This is the first help line",
"This is the second help line",
]
# only allow strings
with pytest.raises(TypeError):
@decorators.help(["This should raise an exception"])
def foo():
pass
with pytest.raises(TypeError):
@decorators.help(5)
def foo():
pass
with pytest.raises(TypeError):
@decorators.help(True)
def foo():
pass
with pytest.raises(TypeError):
@decorators.help(('foo',))
def foo():
pass
|
<commit_before><commit_msg>Add first unit test using py.test
Tests Cardinal's decorators<commit_after>
|
import pytest
import decorators
def test_command():
# ensure commands is a list with foo added
@decorators.command('foo')
def foo():
pass
assert foo.commands == ['foo']
# test that you can pass a list
@decorators.command(['foo', 'bar'])
def foo():
pass
assert foo.commands == ['foo', 'bar']
# test that only one decorator can add commands
@decorators.command('foo')
@decorators.command('bar')
def foo():
pass
assert foo.commands == ['foo']
# only allow strings and lists
with pytest.raises(TypeError):
@decorators.command(True)
def foo():
pass
with pytest.raises(TypeError):
@decorators.command(5)
def foo():
pass
with pytest.raises(TypeError):
@decorators.command(('foo',))
def foo():
pass
def test_help():
# ensure help is a list with the line added
@decorators.help("This is a help line")
def foo():
pass
assert foo.help == ["This is a help line"]
# test the order of the help lines
@decorators.help("This is the first help line")
@decorators.help("This is the second help line")
def foo():
pass
assert foo.help == [
"This is the first help line",
"This is the second help line",
]
# only allow strings
with pytest.raises(TypeError):
@decorators.help(["This should raise an exception"])
def foo():
pass
with pytest.raises(TypeError):
@decorators.help(5)
def foo():
pass
with pytest.raises(TypeError):
@decorators.help(True)
def foo():
pass
with pytest.raises(TypeError):
@decorators.help(('foo',))
def foo():
pass
|
Add first unit test using py.test
Tests Cardinal's decoratorsimport pytest
import decorators
def test_command():
# ensure commands is a list with foo added
@decorators.command('foo')
def foo():
pass
assert foo.commands == ['foo']
# test that you can pass a list
@decorators.command(['foo', 'bar'])
def foo():
pass
assert foo.commands == ['foo', 'bar']
# test that only one decorator can add commands
@decorators.command('foo')
@decorators.command('bar')
def foo():
pass
assert foo.commands == ['foo']
# only allow strings and lists
with pytest.raises(TypeError):
@decorators.command(True)
def foo():
pass
with pytest.raises(TypeError):
@decorators.command(5)
def foo():
pass
with pytest.raises(TypeError):
@decorators.command(('foo',))
def foo():
pass
def test_help():
# ensure help is a list with the line added
@decorators.help("This is a help line")
def foo():
pass
assert foo.help == ["This is a help line"]
# test the order of the help lines
@decorators.help("This is the first help line")
@decorators.help("This is the second help line")
def foo():
pass
assert foo.help == [
"This is the first help line",
"This is the second help line",
]
# only allow strings
with pytest.raises(TypeError):
@decorators.help(["This should raise an exception"])
def foo():
pass
with pytest.raises(TypeError):
@decorators.help(5)
def foo():
pass
with pytest.raises(TypeError):
@decorators.help(True)
def foo():
pass
with pytest.raises(TypeError):
@decorators.help(('foo',))
def foo():
pass
|
<commit_before><commit_msg>Add first unit test using py.test
Tests Cardinal's decorators<commit_after>import pytest
import decorators
def test_command():
# ensure commands is a list with foo added
@decorators.command('foo')
def foo():
pass
assert foo.commands == ['foo']
# test that you can pass a list
@decorators.command(['foo', 'bar'])
def foo():
pass
assert foo.commands == ['foo', 'bar']
# test that only one decorator can add commands
@decorators.command('foo')
@decorators.command('bar')
def foo():
pass
assert foo.commands == ['foo']
# only allow strings and lists
with pytest.raises(TypeError):
@decorators.command(True)
def foo():
pass
with pytest.raises(TypeError):
@decorators.command(5)
def foo():
pass
with pytest.raises(TypeError):
@decorators.command(('foo',))
def foo():
pass
def test_help():
# ensure help is a list with the line added
@decorators.help("This is a help line")
def foo():
pass
assert foo.help == ["This is a help line"]
# test the order of the help lines
@decorators.help("This is the first help line")
@decorators.help("This is the second help line")
def foo():
pass
assert foo.help == [
"This is the first help line",
"This is the second help line",
]
# only allow strings
with pytest.raises(TypeError):
@decorators.help(["This should raise an exception"])
def foo():
pass
with pytest.raises(TypeError):
@decorators.help(5)
def foo():
pass
with pytest.raises(TypeError):
@decorators.help(True)
def foo():
pass
with pytest.raises(TypeError):
@decorators.help(('foo',))
def foo():
pass
|
|
4ba1ea670b208a734aa3f36c694b20e4bb5d6dda
|
alembic/versions/14ef1fe33bd_add_checkpoint_evaluation_table.py
|
alembic/versions/14ef1fe33bd_add_checkpoint_evaluation_table.py
|
"""Add checkpoint_evaluation table
Revision ID: 14ef1fe33bd
Revises: 3d0a468b38f
Create Date: 2015-07-10 02:35:06.655075
"""
# revision identifiers, used by Alembic.
revision = '14ef1fe33bd'
down_revision = '3d0a468b38f'
branch_labels = None
depends_on = None
from alembic import op
from bnd.models import CheckpointEvaluation
def upgrade():
# op.create_table(CheckpointEvaluation.__table__)
# FIXME: Temporary workaround
from bnd import create_app
from bnd.models import db
app = create_app(__name__)
with app.app_context():
db.create_all()
def downgrade():
op.drop_table(CheckpointEvaluation.__tablename__)
|
Write an Alembic migration script
|
Write an Alembic migration script
|
Python
|
mit
|
suminb/bnd,suminb/bnd,suminb/bnd
|
Write an Alembic migration script
|
"""Add checkpoint_evaluation table
Revision ID: 14ef1fe33bd
Revises: 3d0a468b38f
Create Date: 2015-07-10 02:35:06.655075
"""
# revision identifiers, used by Alembic.
revision = '14ef1fe33bd'
down_revision = '3d0a468b38f'
branch_labels = None
depends_on = None
from alembic import op
from bnd.models import CheckpointEvaluation
def upgrade():
# op.create_table(CheckpointEvaluation.__table__)
# FIXME: Temporary workaround
from bnd import create_app
from bnd.models import db
app = create_app(__name__)
with app.app_context():
db.create_all()
def downgrade():
op.drop_table(CheckpointEvaluation.__tablename__)
|
<commit_before><commit_msg>Write an Alembic migration script<commit_after>
|
"""Add checkpoint_evaluation table
Revision ID: 14ef1fe33bd
Revises: 3d0a468b38f
Create Date: 2015-07-10 02:35:06.655075
"""
# revision identifiers, used by Alembic.
revision = '14ef1fe33bd'
down_revision = '3d0a468b38f'
branch_labels = None
depends_on = None
from alembic import op
from bnd.models import CheckpointEvaluation
def upgrade():
# op.create_table(CheckpointEvaluation.__table__)
# FIXME: Temporary workaround
from bnd import create_app
from bnd.models import db
app = create_app(__name__)
with app.app_context():
db.create_all()
def downgrade():
op.drop_table(CheckpointEvaluation.__tablename__)
|
Write an Alembic migration script"""Add checkpoint_evaluation table
Revision ID: 14ef1fe33bd
Revises: 3d0a468b38f
Create Date: 2015-07-10 02:35:06.655075
"""
# revision identifiers, used by Alembic.
revision = '14ef1fe33bd'
down_revision = '3d0a468b38f'
branch_labels = None
depends_on = None
from alembic import op
from bnd.models import CheckpointEvaluation
def upgrade():
# op.create_table(CheckpointEvaluation.__table__)
# FIXME: Temporary workaround
from bnd import create_app
from bnd.models import db
app = create_app(__name__)
with app.app_context():
db.create_all()
def downgrade():
op.drop_table(CheckpointEvaluation.__tablename__)
|
<commit_before><commit_msg>Write an Alembic migration script<commit_after>"""Add checkpoint_evaluation table
Revision ID: 14ef1fe33bd
Revises: 3d0a468b38f
Create Date: 2015-07-10 02:35:06.655075
"""
# revision identifiers, used by Alembic.
revision = '14ef1fe33bd'
down_revision = '3d0a468b38f'
branch_labels = None
depends_on = None
from alembic import op
from bnd.models import CheckpointEvaluation
def upgrade():
# op.create_table(CheckpointEvaluation.__table__)
# FIXME: Temporary workaround
from bnd import create_app
from bnd.models import db
app = create_app(__name__)
with app.app_context():
db.create_all()
def downgrade():
op.drop_table(CheckpointEvaluation.__tablename__)
|
|
d0ac9b735694220f9d101358da1e51a335fad284
|
smap-nepse/preprocessing/l1_regularization.py
|
smap-nepse/preprocessing/l1_regularization.py
|
import numpy as np
import pandas as pd
from sklearn.linear_model import Lasso
from sklearn.preprocessing import StandardScaler
df = pd.read_csv("NABIL.csv")
df.drop(df.columns[[0,1,9,13,14]], axis=1, inplace=True)
#print(df.columns)
df.drop(df.index[:19],inplace=True)
scaler = StandardScaler()
#df['Price']=df['Closing Price'].shift(-1)
#df=df[:-1]
#Y = df['Price']
#df1 = df.drop('Price', axis=1)
X = scaler.fit_transform(df)
Y=df['Closing Price']
names = df.columns
lasso = Lasso(alpha=.3)
lasso.fit(X, Y)
lst = sorted(zip(lasso.coef_, names), key=lambda x: x[0], reverse=True)
print(lst)
|
Select features using l1 regularization
|
Select features using l1 regularization
|
Python
|
mit
|
samshara/Stock-Market-Analysis-and-Prediction
|
Select features using l1 regularization
|
import numpy as np
import pandas as pd
from sklearn.linear_model import Lasso
from sklearn.preprocessing import StandardScaler
df = pd.read_csv("NABIL.csv")
df.drop(df.columns[[0,1,9,13,14]], axis=1, inplace=True)
#print(df.columns)
df.drop(df.index[:19],inplace=True)
scaler = StandardScaler()
#df['Price']=df['Closing Price'].shift(-1)
#df=df[:-1]
#Y = df['Price']
#df1 = df.drop('Price', axis=1)
X = scaler.fit_transform(df)
Y=df['Closing Price']
names = df.columns
lasso = Lasso(alpha=.3)
lasso.fit(X, Y)
lst = sorted(zip(lasso.coef_, names), key=lambda x: x[0], reverse=True)
print(lst)
|
<commit_before><commit_msg>Select features using l1 regularization<commit_after>
|
import numpy as np
import pandas as pd
from sklearn.linear_model import Lasso
from sklearn.preprocessing import StandardScaler
df = pd.read_csv("NABIL.csv")
df.drop(df.columns[[0,1,9,13,14]], axis=1, inplace=True)
#print(df.columns)
df.drop(df.index[:19],inplace=True)
scaler = StandardScaler()
#df['Price']=df['Closing Price'].shift(-1)
#df=df[:-1]
#Y = df['Price']
#df1 = df.drop('Price', axis=1)
X = scaler.fit_transform(df)
Y=df['Closing Price']
names = df.columns
lasso = Lasso(alpha=.3)
lasso.fit(X, Y)
lst = sorted(zip(lasso.coef_, names), key=lambda x: x[0], reverse=True)
print(lst)
|
Select features using l1 regularizationimport numpy as np
import pandas as pd
from sklearn.linear_model import Lasso
from sklearn.preprocessing import StandardScaler
df = pd.read_csv("NABIL.csv")
df.drop(df.columns[[0,1,9,13,14]], axis=1, inplace=True)
#print(df.columns)
df.drop(df.index[:19],inplace=True)
scaler = StandardScaler()
#df['Price']=df['Closing Price'].shift(-1)
#df=df[:-1]
#Y = df['Price']
#df1 = df.drop('Price', axis=1)
X = scaler.fit_transform(df)
Y=df['Closing Price']
names = df.columns
lasso = Lasso(alpha=.3)
lasso.fit(X, Y)
lst = sorted(zip(lasso.coef_, names), key=lambda x: x[0], reverse=True)
print(lst)
|
<commit_before><commit_msg>Select features using l1 regularization<commit_after>import numpy as np
import pandas as pd
from sklearn.linear_model import Lasso
from sklearn.preprocessing import StandardScaler
df = pd.read_csv("NABIL.csv")
df.drop(df.columns[[0,1,9,13,14]], axis=1, inplace=True)
#print(df.columns)
df.drop(df.index[:19],inplace=True)
scaler = StandardScaler()
#df['Price']=df['Closing Price'].shift(-1)
#df=df[:-1]
#Y = df['Price']
#df1 = df.drop('Price', axis=1)
X = scaler.fit_transform(df)
Y=df['Closing Price']
names = df.columns
lasso = Lasso(alpha=.3)
lasso.fit(X, Y)
lst = sorted(zip(lasso.coef_, names), key=lambda x: x[0], reverse=True)
print(lst)
|
|
899d432b87397368f53010493a3cd67506bbe5a5
|
hooks/post_gen_project.py
|
hooks/post_gen_project.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger('post_gen_project')
import shutil
import os
{% if cookiecutter.docs_tool == "mkdocs" %}
logger.info('Moving files for mkdocs.')
os.rename('mkdocs/mkdocs.yml', 'mkdocs.yml')
shutil.move('mkdocs', 'docs')
{% endif %}
|
Implement a post gen hook to rename mkdocs files
|
Implement a post gen hook to rename mkdocs files
|
Python
|
mit
|
pytest-dev/cookiecutter-pytest-plugin,luzfcb/cookiecutter-pytest-plugin,s0undt3ch/cookiecutter-pytest-plugin
|
Implement a post gen hook to rename mkdocs files
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger('post_gen_project')
import shutil
import os
{% if cookiecutter.docs_tool == "mkdocs" %}
logger.info('Moving files for mkdocs.')
os.rename('mkdocs/mkdocs.yml', 'mkdocs.yml')
shutil.move('mkdocs', 'docs')
{% endif %}
|
<commit_before><commit_msg>Implement a post gen hook to rename mkdocs files<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger('post_gen_project')
import shutil
import os
{% if cookiecutter.docs_tool == "mkdocs" %}
logger.info('Moving files for mkdocs.')
os.rename('mkdocs/mkdocs.yml', 'mkdocs.yml')
shutil.move('mkdocs', 'docs')
{% endif %}
|
Implement a post gen hook to rename mkdocs files#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger('post_gen_project')
import shutil
import os
{% if cookiecutter.docs_tool == "mkdocs" %}
logger.info('Moving files for mkdocs.')
os.rename('mkdocs/mkdocs.yml', 'mkdocs.yml')
shutil.move('mkdocs', 'docs')
{% endif %}
|
<commit_before><commit_msg>Implement a post gen hook to rename mkdocs files<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger('post_gen_project')
import shutil
import os
{% if cookiecutter.docs_tool == "mkdocs" %}
logger.info('Moving files for mkdocs.')
os.rename('mkdocs/mkdocs.yml', 'mkdocs.yml')
shutil.move('mkdocs', 'docs')
{% endif %}
|
|
0b8dc2afb3b3b9b9988e04586ca6c732cd4c7b98
|
test/misc/test_commands.py
|
test/misc/test_commands.py
|
import json
import datetime
from simplekv import fs
from test.base import ApiTestCase
from zou.app.utils import commands
def totimestamp(dt, epoch=datetime.datetime(1970, 1, 1)):
td = dt - epoch
return (td.microseconds + (td.seconds + td.days * 86400) * 10**6) / 10**6
class CommandsTestCase(ApiTestCase):
def setUp(self):
super(CommandsTestCase, self).setUp()
self.store = fs.FilesystemStore(
self.flask_app.config["JWT_TOKEN_FOLDER"]
)
for key in self.store.keys():
self.store.delete(key)
def test_clean_auth_tokens_revoked(self):
now = datetime.datetime.now()
self.store.put("testkey", json.dumps({
"token": {
"exp": totimestamp(now + datetime.timedelta(days=8))
},
"revoked": False
}).encode("utf-8"))
self.store.put("testkey2", json.dumps({
"token": {
"exp": totimestamp(now + datetime.timedelta(days=8))
},
"revoked": True
}).encode("utf-8"))
self.assertEquals(len(self.store.keys()), 2)
commands.clean_auth_tokens()
self.assertEquals(len(self.store.keys()), 1)
self.assertEquals(self.store.keys()[0], "testkey")
def test_clean_auth_tokens_expired(self):
now = datetime.datetime.now()
self.store.put("testkey", json.dumps({
"token": {
"exp": totimestamp(now - datetime.timedelta(days=8))
},
"revoked": False
}).encode("utf-8"))
self.store.put("testkey2", json.dumps({
"token": {
"exp": totimestamp(now + datetime.timedelta(days=8))
},
"revoked": False
}).encode("utf-8"))
self.assertEquals(len(self.store.keys()), 2)
commands.clean_auth_tokens()
self.assertEquals(len(self.store.keys()), 1)
self.assertEquals(self.store.keys()[0], "testkey2")
|
Add tests for commands module
|
Add tests for commands module
|
Python
|
agpl-3.0
|
cgwire/zou
|
Add tests for commands module
|
import json
import datetime
from simplekv import fs
from test.base import ApiTestCase
from zou.app.utils import commands
def totimestamp(dt, epoch=datetime.datetime(1970, 1, 1)):
td = dt - epoch
return (td.microseconds + (td.seconds + td.days * 86400) * 10**6) / 10**6
class CommandsTestCase(ApiTestCase):
def setUp(self):
super(CommandsTestCase, self).setUp()
self.store = fs.FilesystemStore(
self.flask_app.config["JWT_TOKEN_FOLDER"]
)
for key in self.store.keys():
self.store.delete(key)
def test_clean_auth_tokens_revoked(self):
now = datetime.datetime.now()
self.store.put("testkey", json.dumps({
"token": {
"exp": totimestamp(now + datetime.timedelta(days=8))
},
"revoked": False
}).encode("utf-8"))
self.store.put("testkey2", json.dumps({
"token": {
"exp": totimestamp(now + datetime.timedelta(days=8))
},
"revoked": True
}).encode("utf-8"))
self.assertEquals(len(self.store.keys()), 2)
commands.clean_auth_tokens()
self.assertEquals(len(self.store.keys()), 1)
self.assertEquals(self.store.keys()[0], "testkey")
def test_clean_auth_tokens_expired(self):
now = datetime.datetime.now()
self.store.put("testkey", json.dumps({
"token": {
"exp": totimestamp(now - datetime.timedelta(days=8))
},
"revoked": False
}).encode("utf-8"))
self.store.put("testkey2", json.dumps({
"token": {
"exp": totimestamp(now + datetime.timedelta(days=8))
},
"revoked": False
}).encode("utf-8"))
self.assertEquals(len(self.store.keys()), 2)
commands.clean_auth_tokens()
self.assertEquals(len(self.store.keys()), 1)
self.assertEquals(self.store.keys()[0], "testkey2")
|
<commit_before><commit_msg>Add tests for commands module<commit_after>
|
import json
import datetime
from simplekv import fs
from test.base import ApiTestCase
from zou.app.utils import commands
def totimestamp(dt, epoch=datetime.datetime(1970, 1, 1)):
td = dt - epoch
return (td.microseconds + (td.seconds + td.days * 86400) * 10**6) / 10**6
class CommandsTestCase(ApiTestCase):
def setUp(self):
super(CommandsTestCase, self).setUp()
self.store = fs.FilesystemStore(
self.flask_app.config["JWT_TOKEN_FOLDER"]
)
for key in self.store.keys():
self.store.delete(key)
def test_clean_auth_tokens_revoked(self):
now = datetime.datetime.now()
self.store.put("testkey", json.dumps({
"token": {
"exp": totimestamp(now + datetime.timedelta(days=8))
},
"revoked": False
}).encode("utf-8"))
self.store.put("testkey2", json.dumps({
"token": {
"exp": totimestamp(now + datetime.timedelta(days=8))
},
"revoked": True
}).encode("utf-8"))
self.assertEquals(len(self.store.keys()), 2)
commands.clean_auth_tokens()
self.assertEquals(len(self.store.keys()), 1)
self.assertEquals(self.store.keys()[0], "testkey")
def test_clean_auth_tokens_expired(self):
now = datetime.datetime.now()
self.store.put("testkey", json.dumps({
"token": {
"exp": totimestamp(now - datetime.timedelta(days=8))
},
"revoked": False
}).encode("utf-8"))
self.store.put("testkey2", json.dumps({
"token": {
"exp": totimestamp(now + datetime.timedelta(days=8))
},
"revoked": False
}).encode("utf-8"))
self.assertEquals(len(self.store.keys()), 2)
commands.clean_auth_tokens()
self.assertEquals(len(self.store.keys()), 1)
self.assertEquals(self.store.keys()[0], "testkey2")
|
Add tests for commands moduleimport json
import datetime
from simplekv import fs
from test.base import ApiTestCase
from zou.app.utils import commands
def totimestamp(dt, epoch=datetime.datetime(1970, 1, 1)):
td = dt - epoch
return (td.microseconds + (td.seconds + td.days * 86400) * 10**6) / 10**6
class CommandsTestCase(ApiTestCase):
def setUp(self):
super(CommandsTestCase, self).setUp()
self.store = fs.FilesystemStore(
self.flask_app.config["JWT_TOKEN_FOLDER"]
)
for key in self.store.keys():
self.store.delete(key)
def test_clean_auth_tokens_revoked(self):
now = datetime.datetime.now()
self.store.put("testkey", json.dumps({
"token": {
"exp": totimestamp(now + datetime.timedelta(days=8))
},
"revoked": False
}).encode("utf-8"))
self.store.put("testkey2", json.dumps({
"token": {
"exp": totimestamp(now + datetime.timedelta(days=8))
},
"revoked": True
}).encode("utf-8"))
self.assertEquals(len(self.store.keys()), 2)
commands.clean_auth_tokens()
self.assertEquals(len(self.store.keys()), 1)
self.assertEquals(self.store.keys()[0], "testkey")
def test_clean_auth_tokens_expired(self):
now = datetime.datetime.now()
self.store.put("testkey", json.dumps({
"token": {
"exp": totimestamp(now - datetime.timedelta(days=8))
},
"revoked": False
}).encode("utf-8"))
self.store.put("testkey2", json.dumps({
"token": {
"exp": totimestamp(now + datetime.timedelta(days=8))
},
"revoked": False
}).encode("utf-8"))
self.assertEquals(len(self.store.keys()), 2)
commands.clean_auth_tokens()
self.assertEquals(len(self.store.keys()), 1)
self.assertEquals(self.store.keys()[0], "testkey2")
|
<commit_before><commit_msg>Add tests for commands module<commit_after>import json
import datetime
from simplekv import fs
from test.base import ApiTestCase
from zou.app.utils import commands
def totimestamp(dt, epoch=datetime.datetime(1970, 1, 1)):
td = dt - epoch
return (td.microseconds + (td.seconds + td.days * 86400) * 10**6) / 10**6
class CommandsTestCase(ApiTestCase):
def setUp(self):
super(CommandsTestCase, self).setUp()
self.store = fs.FilesystemStore(
self.flask_app.config["JWT_TOKEN_FOLDER"]
)
for key in self.store.keys():
self.store.delete(key)
def test_clean_auth_tokens_revoked(self):
now = datetime.datetime.now()
self.store.put("testkey", json.dumps({
"token": {
"exp": totimestamp(now + datetime.timedelta(days=8))
},
"revoked": False
}).encode("utf-8"))
self.store.put("testkey2", json.dumps({
"token": {
"exp": totimestamp(now + datetime.timedelta(days=8))
},
"revoked": True
}).encode("utf-8"))
self.assertEquals(len(self.store.keys()), 2)
commands.clean_auth_tokens()
self.assertEquals(len(self.store.keys()), 1)
self.assertEquals(self.store.keys()[0], "testkey")
def test_clean_auth_tokens_expired(self):
now = datetime.datetime.now()
self.store.put("testkey", json.dumps({
"token": {
"exp": totimestamp(now - datetime.timedelta(days=8))
},
"revoked": False
}).encode("utf-8"))
self.store.put("testkey2", json.dumps({
"token": {
"exp": totimestamp(now + datetime.timedelta(days=8))
},
"revoked": False
}).encode("utf-8"))
self.assertEquals(len(self.store.keys()), 2)
commands.clean_auth_tokens()
self.assertEquals(len(self.store.keys()), 1)
self.assertEquals(self.store.keys()[0], "testkey2")
|
|
71723aecd5466dc7ad9bd27b2f1d2d267e695ff5
|
tests/test_kernel_execution.py
|
tests/test_kernel_execution.py
|
from parcels import Grid, ScipyParticle, JITParticle, KernelOp
import numpy as np
import pytest
ptype = {'scipy': ScipyParticle, 'jit': JITParticle}
def DoNothing(particle, grid, time, dt):
return KernelOp.SUCCESS
@pytest.fixture
def grid(xdim=20, ydim=20):
""" Standard unit mesh grid """
lon = np.linspace(0., 1., xdim, dtype=np.float32)
lat = np.linspace(0., 1., ydim, dtype=np.float32)
U, V = np.meshgrid(lat, lon)
return Grid.from_data(np.array(U, dtype=np.float32), lon, lat,
np.array(V, dtype=np.float32), lon, lat,
mesh='flat')
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
@pytest.mark.parametrize('start, end, substeps, dt', [
(0., 10., 1, 1.),
(0., 10., 4, 1.),
(0., 10., 1, 3.),
(2., 16., 5, 3.),
(20., 10., 4, -1.),
(20., -10., 7, -2.),
])
def test_execution_endtime(grid, mode, start, end, substeps, dt, npart=10):
pset = grid.ParticleSet(npart, pclass=ptype[mode],
lon=np.linspace(0, 1, npart, dtype=np.float32),
lat=np.linspace(1, 0, npart, dtype=np.float32))
pset.execute(DoNothing, starttime=start, endtime=end, dt=dt)
assert np.allclose(np.array([p.time for p in pset]), end)
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
@pytest.mark.parametrize('start, end, substeps, dt', [
(0., 10., 1, 1.),
(0., 10., 4, 1.),
(0., 10., 1, 3.),
(2., 16., 5, 3.),
(20., 10., 4, -1.),
(20., -10., 7, -2.),
])
def test_execution_runtime(grid, mode, start, end, substeps, dt, npart=10):
pset = grid.ParticleSet(npart, pclass=ptype[mode],
lon=np.linspace(0, 1, npart, dtype=np.float32),
lat=np.linspace(1, 0, npart, dtype=np.float32))
t_step = (end - start) / substeps
for _ in range(substeps):
pset.execute(DoNothing, starttime=start, runtime=t_step, dt=dt)
start += t_step
assert np.allclose(np.array([p.time for p in pset]), end)
|
Add test to verify timestepping and endtimes after execute
|
Particle: Add test to verify timestepping and endtimes after execute
|
Python
|
mit
|
OceanPARCELS/parcels,OceanPARCELS/parcels
|
Particle: Add test to verify timestepping and endtimes after execute
|
from parcels import Grid, ScipyParticle, JITParticle, KernelOp
import numpy as np
import pytest
ptype = {'scipy': ScipyParticle, 'jit': JITParticle}
def DoNothing(particle, grid, time, dt):
return KernelOp.SUCCESS
@pytest.fixture
def grid(xdim=20, ydim=20):
""" Standard unit mesh grid """
lon = np.linspace(0., 1., xdim, dtype=np.float32)
lat = np.linspace(0., 1., ydim, dtype=np.float32)
U, V = np.meshgrid(lat, lon)
return Grid.from_data(np.array(U, dtype=np.float32), lon, lat,
np.array(V, dtype=np.float32), lon, lat,
mesh='flat')
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
@pytest.mark.parametrize('start, end, substeps, dt', [
(0., 10., 1, 1.),
(0., 10., 4, 1.),
(0., 10., 1, 3.),
(2., 16., 5, 3.),
(20., 10., 4, -1.),
(20., -10., 7, -2.),
])
def test_execution_endtime(grid, mode, start, end, substeps, dt, npart=10):
pset = grid.ParticleSet(npart, pclass=ptype[mode],
lon=np.linspace(0, 1, npart, dtype=np.float32),
lat=np.linspace(1, 0, npart, dtype=np.float32))
pset.execute(DoNothing, starttime=start, endtime=end, dt=dt)
assert np.allclose(np.array([p.time for p in pset]), end)
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
@pytest.mark.parametrize('start, end, substeps, dt', [
(0., 10., 1, 1.),
(0., 10., 4, 1.),
(0., 10., 1, 3.),
(2., 16., 5, 3.),
(20., 10., 4, -1.),
(20., -10., 7, -2.),
])
def test_execution_runtime(grid, mode, start, end, substeps, dt, npart=10):
pset = grid.ParticleSet(npart, pclass=ptype[mode],
lon=np.linspace(0, 1, npart, dtype=np.float32),
lat=np.linspace(1, 0, npart, dtype=np.float32))
t_step = (end - start) / substeps
for _ in range(substeps):
pset.execute(DoNothing, starttime=start, runtime=t_step, dt=dt)
start += t_step
assert np.allclose(np.array([p.time for p in pset]), end)
|
<commit_before><commit_msg>Particle: Add test to verify timestepping and endtimes after execute<commit_after>
|
from parcels import Grid, ScipyParticle, JITParticle, KernelOp
import numpy as np
import pytest
ptype = {'scipy': ScipyParticle, 'jit': JITParticle}
def DoNothing(particle, grid, time, dt):
return KernelOp.SUCCESS
@pytest.fixture
def grid(xdim=20, ydim=20):
""" Standard unit mesh grid """
lon = np.linspace(0., 1., xdim, dtype=np.float32)
lat = np.linspace(0., 1., ydim, dtype=np.float32)
U, V = np.meshgrid(lat, lon)
return Grid.from_data(np.array(U, dtype=np.float32), lon, lat,
np.array(V, dtype=np.float32), lon, lat,
mesh='flat')
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
@pytest.mark.parametrize('start, end, substeps, dt', [
(0., 10., 1, 1.),
(0., 10., 4, 1.),
(0., 10., 1, 3.),
(2., 16., 5, 3.),
(20., 10., 4, -1.),
(20., -10., 7, -2.),
])
def test_execution_endtime(grid, mode, start, end, substeps, dt, npart=10):
pset = grid.ParticleSet(npart, pclass=ptype[mode],
lon=np.linspace(0, 1, npart, dtype=np.float32),
lat=np.linspace(1, 0, npart, dtype=np.float32))
pset.execute(DoNothing, starttime=start, endtime=end, dt=dt)
assert np.allclose(np.array([p.time for p in pset]), end)
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
@pytest.mark.parametrize('start, end, substeps, dt', [
(0., 10., 1, 1.),
(0., 10., 4, 1.),
(0., 10., 1, 3.),
(2., 16., 5, 3.),
(20., 10., 4, -1.),
(20., -10., 7, -2.),
])
def test_execution_runtime(grid, mode, start, end, substeps, dt, npart=10):
pset = grid.ParticleSet(npart, pclass=ptype[mode],
lon=np.linspace(0, 1, npart, dtype=np.float32),
lat=np.linspace(1, 0, npart, dtype=np.float32))
t_step = (end - start) / substeps
for _ in range(substeps):
pset.execute(DoNothing, starttime=start, runtime=t_step, dt=dt)
start += t_step
assert np.allclose(np.array([p.time for p in pset]), end)
|
Particle: Add test to verify timestepping and endtimes after executefrom parcels import Grid, ScipyParticle, JITParticle, KernelOp
import numpy as np
import pytest
ptype = {'scipy': ScipyParticle, 'jit': JITParticle}
def DoNothing(particle, grid, time, dt):
return KernelOp.SUCCESS
@pytest.fixture
def grid(xdim=20, ydim=20):
""" Standard unit mesh grid """
lon = np.linspace(0., 1., xdim, dtype=np.float32)
lat = np.linspace(0., 1., ydim, dtype=np.float32)
U, V = np.meshgrid(lat, lon)
return Grid.from_data(np.array(U, dtype=np.float32), lon, lat,
np.array(V, dtype=np.float32), lon, lat,
mesh='flat')
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
@pytest.mark.parametrize('start, end, substeps, dt', [
(0., 10., 1, 1.),
(0., 10., 4, 1.),
(0., 10., 1, 3.),
(2., 16., 5, 3.),
(20., 10., 4, -1.),
(20., -10., 7, -2.),
])
def test_execution_endtime(grid, mode, start, end, substeps, dt, npart=10):
pset = grid.ParticleSet(npart, pclass=ptype[mode],
lon=np.linspace(0, 1, npart, dtype=np.float32),
lat=np.linspace(1, 0, npart, dtype=np.float32))
pset.execute(DoNothing, starttime=start, endtime=end, dt=dt)
assert np.allclose(np.array([p.time for p in pset]), end)
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
@pytest.mark.parametrize('start, end, substeps, dt', [
(0., 10., 1, 1.),
(0., 10., 4, 1.),
(0., 10., 1, 3.),
(2., 16., 5, 3.),
(20., 10., 4, -1.),
(20., -10., 7, -2.),
])
def test_execution_runtime(grid, mode, start, end, substeps, dt, npart=10):
pset = grid.ParticleSet(npart, pclass=ptype[mode],
lon=np.linspace(0, 1, npart, dtype=np.float32),
lat=np.linspace(1, 0, npart, dtype=np.float32))
t_step = (end - start) / substeps
for _ in range(substeps):
pset.execute(DoNothing, starttime=start, runtime=t_step, dt=dt)
start += t_step
assert np.allclose(np.array([p.time for p in pset]), end)
|
<commit_before><commit_msg>Particle: Add test to verify timestepping and endtimes after execute<commit_after>from parcels import Grid, ScipyParticle, JITParticle, KernelOp
import numpy as np
import pytest
ptype = {'scipy': ScipyParticle, 'jit': JITParticle}
def DoNothing(particle, grid, time, dt):
return KernelOp.SUCCESS
@pytest.fixture
def grid(xdim=20, ydim=20):
""" Standard unit mesh grid """
lon = np.linspace(0., 1., xdim, dtype=np.float32)
lat = np.linspace(0., 1., ydim, dtype=np.float32)
U, V = np.meshgrid(lat, lon)
return Grid.from_data(np.array(U, dtype=np.float32), lon, lat,
np.array(V, dtype=np.float32), lon, lat,
mesh='flat')
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
@pytest.mark.parametrize('start, end, substeps, dt', [
(0., 10., 1, 1.),
(0., 10., 4, 1.),
(0., 10., 1, 3.),
(2., 16., 5, 3.),
(20., 10., 4, -1.),
(20., -10., 7, -2.),
])
def test_execution_endtime(grid, mode, start, end, substeps, dt, npart=10):
pset = grid.ParticleSet(npart, pclass=ptype[mode],
lon=np.linspace(0, 1, npart, dtype=np.float32),
lat=np.linspace(1, 0, npart, dtype=np.float32))
pset.execute(DoNothing, starttime=start, endtime=end, dt=dt)
assert np.allclose(np.array([p.time for p in pset]), end)
@pytest.mark.parametrize('mode', ['scipy', 'jit'])
@pytest.mark.parametrize('start, end, substeps, dt', [
(0., 10., 1, 1.),
(0., 10., 4, 1.),
(0., 10., 1, 3.),
(2., 16., 5, 3.),
(20., 10., 4, -1.),
(20., -10., 7, -2.),
])
def test_execution_runtime(grid, mode, start, end, substeps, dt, npart=10):
pset = grid.ParticleSet(npart, pclass=ptype[mode],
lon=np.linspace(0, 1, npart, dtype=np.float32),
lat=np.linspace(1, 0, npart, dtype=np.float32))
t_step = (end - start) / substeps
for _ in range(substeps):
pset.execute(DoNothing, starttime=start, runtime=t_step, dt=dt)
start += t_step
assert np.allclose(np.array([p.time for p in pset]), end)
|
|
ad9f701c9e601a0678ee92e980a15bdbd8d18828
|
src/metamodels/nig_normal.py
|
src/metamodels/nig_normal.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A model that posts that all columns are independently Gaussian with
unknown parameters.
The parameters are taken from the normal and inverse-gamma conjuate
prior.
This module implements the :class:`bayeslite.IBayesDBMetamodel`
interface for the NIG-Normal model.
"""
import math
import random
import bayeslite.metamodel as metamodel
class NIGNormalMetamodel(metamodel.IBayesDBMetamodel):
"""Normal-Inverse-Gamma-Normal metamodel for BayesDB.
The metamodel is named ``nig_normal`` in BQL::
CREATE GENERATOR t_nig FOR t USING nig_normal(..)
"""
def __init__(self, seed=0):
self.prng = random.Random(seed)
def name(self): return 'nig_normal'
def register(self, bdb):
bdb.sql_execute("INSERT INTO bayesdb_metamodel (name, version) VALUES ('nig_normal', 1)")
def create_generator(self, bdb, table, schema, instantiate):
instantiate(schema)
def drop_generator(self, *args): pass
def rename_column(self, *args): pass
def initialize_models(self, *args): pass
def drop_models(self, *args): pass
def analyze_models(self, *args): pass
def simulate_joint(self, _bdb, _generator_id, targets, _constraints):
return [self.prng.gauss(0, 1) for _ in targets]
def logpdf(self, _bdb, _generator_id, targets, _constraints):
return sum(logpdfOne(value, 0, 1) for (_, _, value) in targets)
def insert(self, *args): pass
def remove(self, *args): pass
def infer(self, *args): pass
HALF_LOG2PI = 0.5 * math.log(2 * math.pi)
def logpdfOne(x, mu, sigma):
deviation = x - mu
return - math.log(sigma) - HALF_LOG2PI \
- (0.5 * deviation * deviation / (sigma * sigma))
|
Add a starting skeleton for NIG-Normal, by copying IID-Normal, mutatis mutandis.
|
Add a starting skeleton for NIG-Normal, by copying IID-Normal, mutatis mutandis.
|
Python
|
apache-2.0
|
probcomp/bayeslite,probcomp/bayeslite
|
Add a starting skeleton for NIG-Normal, by copying IID-Normal, mutatis mutandis.
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A model that posts that all columns are independently Gaussian with
unknown parameters.
The parameters are taken from the normal and inverse-gamma conjuate
prior.
This module implements the :class:`bayeslite.IBayesDBMetamodel`
interface for the NIG-Normal model.
"""
import math
import random
import bayeslite.metamodel as metamodel
class NIGNormalMetamodel(metamodel.IBayesDBMetamodel):
"""Normal-Inverse-Gamma-Normal metamodel for BayesDB.
The metamodel is named ``nig_normal`` in BQL::
CREATE GENERATOR t_nig FOR t USING nig_normal(..)
"""
def __init__(self, seed=0):
self.prng = random.Random(seed)
def name(self): return 'nig_normal'
def register(self, bdb):
bdb.sql_execute("INSERT INTO bayesdb_metamodel (name, version) VALUES ('nig_normal', 1)")
def create_generator(self, bdb, table, schema, instantiate):
instantiate(schema)
def drop_generator(self, *args): pass
def rename_column(self, *args): pass
def initialize_models(self, *args): pass
def drop_models(self, *args): pass
def analyze_models(self, *args): pass
def simulate_joint(self, _bdb, _generator_id, targets, _constraints):
return [self.prng.gauss(0, 1) for _ in targets]
def logpdf(self, _bdb, _generator_id, targets, _constraints):
return sum(logpdfOne(value, 0, 1) for (_, _, value) in targets)
def insert(self, *args): pass
def remove(self, *args): pass
def infer(self, *args): pass
HALF_LOG2PI = 0.5 * math.log(2 * math.pi)
def logpdfOne(x, mu, sigma):
deviation = x - mu
return - math.log(sigma) - HALF_LOG2PI \
- (0.5 * deviation * deviation / (sigma * sigma))
|
<commit_before><commit_msg>Add a starting skeleton for NIG-Normal, by copying IID-Normal, mutatis mutandis.<commit_after>
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A model that posts that all columns are independently Gaussian with
unknown parameters.
The parameters are taken from the normal and inverse-gamma conjuate
prior.
This module implements the :class:`bayeslite.IBayesDBMetamodel`
interface for the NIG-Normal model.
"""
import math
import random
import bayeslite.metamodel as metamodel
class NIGNormalMetamodel(metamodel.IBayesDBMetamodel):
"""Normal-Inverse-Gamma-Normal metamodel for BayesDB.
The metamodel is named ``nig_normal`` in BQL::
CREATE GENERATOR t_nig FOR t USING nig_normal(..)
"""
def __init__(self, seed=0):
self.prng = random.Random(seed)
def name(self): return 'nig_normal'
def register(self, bdb):
bdb.sql_execute("INSERT INTO bayesdb_metamodel (name, version) VALUES ('nig_normal', 1)")
def create_generator(self, bdb, table, schema, instantiate):
instantiate(schema)
def drop_generator(self, *args): pass
def rename_column(self, *args): pass
def initialize_models(self, *args): pass
def drop_models(self, *args): pass
def analyze_models(self, *args): pass
def simulate_joint(self, _bdb, _generator_id, targets, _constraints):
return [self.prng.gauss(0, 1) for _ in targets]
def logpdf(self, _bdb, _generator_id, targets, _constraints):
return sum(logpdfOne(value, 0, 1) for (_, _, value) in targets)
def insert(self, *args): pass
def remove(self, *args): pass
def infer(self, *args): pass
HALF_LOG2PI = 0.5 * math.log(2 * math.pi)
def logpdfOne(x, mu, sigma):
deviation = x - mu
return - math.log(sigma) - HALF_LOG2PI \
- (0.5 * deviation * deviation / (sigma * sigma))
|
Add a starting skeleton for NIG-Normal, by copying IID-Normal, mutatis mutandis.# -*- coding: utf-8 -*-
# Copyright (c) 2015, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A model that posts that all columns are independently Gaussian with
unknown parameters.
The parameters are taken from the normal and inverse-gamma conjuate
prior.
This module implements the :class:`bayeslite.IBayesDBMetamodel`
interface for the NIG-Normal model.
"""
import math
import random
import bayeslite.metamodel as metamodel
class NIGNormalMetamodel(metamodel.IBayesDBMetamodel):
"""Normal-Inverse-Gamma-Normal metamodel for BayesDB.
The metamodel is named ``nig_normal`` in BQL::
CREATE GENERATOR t_nig FOR t USING nig_normal(..)
"""
def __init__(self, seed=0):
self.prng = random.Random(seed)
def name(self): return 'nig_normal'
def register(self, bdb):
bdb.sql_execute("INSERT INTO bayesdb_metamodel (name, version) VALUES ('nig_normal', 1)")
def create_generator(self, bdb, table, schema, instantiate):
instantiate(schema)
def drop_generator(self, *args): pass
def rename_column(self, *args): pass
def initialize_models(self, *args): pass
def drop_models(self, *args): pass
def analyze_models(self, *args): pass
def simulate_joint(self, _bdb, _generator_id, targets, _constraints):
return [self.prng.gauss(0, 1) for _ in targets]
def logpdf(self, _bdb, _generator_id, targets, _constraints):
return sum(logpdfOne(value, 0, 1) for (_, _, value) in targets)
def insert(self, *args): pass
def remove(self, *args): pass
def infer(self, *args): pass
HALF_LOG2PI = 0.5 * math.log(2 * math.pi)
def logpdfOne(x, mu, sigma):
deviation = x - mu
return - math.log(sigma) - HALF_LOG2PI \
- (0.5 * deviation * deviation / (sigma * sigma))
|
<commit_before><commit_msg>Add a starting skeleton for NIG-Normal, by copying IID-Normal, mutatis mutandis.<commit_after># -*- coding: utf-8 -*-
# Copyright (c) 2015, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A model that posts that all columns are independently Gaussian with
unknown parameters.
The parameters are taken from the normal and inverse-gamma conjuate
prior.
This module implements the :class:`bayeslite.IBayesDBMetamodel`
interface for the NIG-Normal model.
"""
import math
import random
import bayeslite.metamodel as metamodel
class NIGNormalMetamodel(metamodel.IBayesDBMetamodel):
"""Normal-Inverse-Gamma-Normal metamodel for BayesDB.
The metamodel is named ``nig_normal`` in BQL::
CREATE GENERATOR t_nig FOR t USING nig_normal(..)
"""
def __init__(self, seed=0):
self.prng = random.Random(seed)
def name(self): return 'nig_normal'
def register(self, bdb):
bdb.sql_execute("INSERT INTO bayesdb_metamodel (name, version) VALUES ('nig_normal', 1)")
def create_generator(self, bdb, table, schema, instantiate):
instantiate(schema)
def drop_generator(self, *args): pass
def rename_column(self, *args): pass
def initialize_models(self, *args): pass
def drop_models(self, *args): pass
def analyze_models(self, *args): pass
def simulate_joint(self, _bdb, _generator_id, targets, _constraints):
return [self.prng.gauss(0, 1) for _ in targets]
def logpdf(self, _bdb, _generator_id, targets, _constraints):
return sum(logpdfOne(value, 0, 1) for (_, _, value) in targets)
def insert(self, *args): pass
def remove(self, *args): pass
def infer(self, *args): pass
HALF_LOG2PI = 0.5 * math.log(2 * math.pi)
def logpdfOne(x, mu, sigma):
deviation = x - mu
return - math.log(sigma) - HALF_LOG2PI \
- (0.5 * deviation * deviation / (sigma * sigma))
|
|
46c17b1aeba9b4928df0c9f0c9057e5bbaf77378
|
dp/lcs.py
|
dp/lcs.py
|
def lcslen(x, y):
# Allocate extra row and column for the empty sequence case.
# Extra row and column is appended to the end and exploit
# Python's ability of negative indices: x[-1] is the last elem.
c = [[0 for _ in range(len(y) + 1)] for _ in range(len(x) + 1)]
for i, xi in enumerate(x):
for j, yj in enumerate(y):
if xi == yj:
c[i][j] = 1 + c[i-1][j-1]
else:
c[i][j] = max(c[i][j-1], c[i-1][j])
return c
def backtrack(c, x, y, i, j):
if i == -1 or j == -1:
return ""
elif x[i] == y[j]:
return backtrack(c, x, y, i-1, j-1) + x[i]
else:
if c[i][j-1] > c[i-1][j]:
return backtrack(c, x, y, i, j-1)
else:
return backtrack(c, x, y, i-1, j)
def lcs(x, y):
c = lcslen(x,y)
return backtrack(c, x, y, len(x)-1, len(y)-1)
|
Add Longest Common Subsequent implementation
|
Add Longest Common Subsequent implementation
|
Python
|
mit
|
dzeban/cs,dzeban/cs,dzeban/cs,dzeban/cs
|
Add Longest Common Subsequent implementation
|
def lcslen(x, y):
# Allocate extra row and column for the empty sequence case.
# Extra row and column is appended to the end and exploit
# Python's ability of negative indices: x[-1] is the last elem.
c = [[0 for _ in range(len(y) + 1)] for _ in range(len(x) + 1)]
for i, xi in enumerate(x):
for j, yj in enumerate(y):
if xi == yj:
c[i][j] = 1 + c[i-1][j-1]
else:
c[i][j] = max(c[i][j-1], c[i-1][j])
return c
def backtrack(c, x, y, i, j):
if i == -1 or j == -1:
return ""
elif x[i] == y[j]:
return backtrack(c, x, y, i-1, j-1) + x[i]
else:
if c[i][j-1] > c[i-1][j]:
return backtrack(c, x, y, i, j-1)
else:
return backtrack(c, x, y, i-1, j)
def lcs(x, y):
c = lcslen(x,y)
return backtrack(c, x, y, len(x)-1, len(y)-1)
|
<commit_before><commit_msg>Add Longest Common Subsequent implementation<commit_after>
|
def lcslen(x, y):
# Allocate extra row and column for the empty sequence case.
# Extra row and column is appended to the end and exploit
# Python's ability of negative indices: x[-1] is the last elem.
c = [[0 for _ in range(len(y) + 1)] for _ in range(len(x) + 1)]
for i, xi in enumerate(x):
for j, yj in enumerate(y):
if xi == yj:
c[i][j] = 1 + c[i-1][j-1]
else:
c[i][j] = max(c[i][j-1], c[i-1][j])
return c
def backtrack(c, x, y, i, j):
if i == -1 or j == -1:
return ""
elif x[i] == y[j]:
return backtrack(c, x, y, i-1, j-1) + x[i]
else:
if c[i][j-1] > c[i-1][j]:
return backtrack(c, x, y, i, j-1)
else:
return backtrack(c, x, y, i-1, j)
def lcs(x, y):
c = lcslen(x,y)
return backtrack(c, x, y, len(x)-1, len(y)-1)
|
Add Longest Common Subsequent implementationdef lcslen(x, y):
# Allocate extra row and column for the empty sequence case.
# Extra row and column is appended to the end and exploit
# Python's ability of negative indices: x[-1] is the last elem.
c = [[0 for _ in range(len(y) + 1)] for _ in range(len(x) + 1)]
for i, xi in enumerate(x):
for j, yj in enumerate(y):
if xi == yj:
c[i][j] = 1 + c[i-1][j-1]
else:
c[i][j] = max(c[i][j-1], c[i-1][j])
return c
def backtrack(c, x, y, i, j):
if i == -1 or j == -1:
return ""
elif x[i] == y[j]:
return backtrack(c, x, y, i-1, j-1) + x[i]
else:
if c[i][j-1] > c[i-1][j]:
return backtrack(c, x, y, i, j-1)
else:
return backtrack(c, x, y, i-1, j)
def lcs(x, y):
c = lcslen(x,y)
return backtrack(c, x, y, len(x)-1, len(y)-1)
|
<commit_before><commit_msg>Add Longest Common Subsequent implementation<commit_after>def lcslen(x, y):
# Allocate extra row and column for the empty sequence case.
# Extra row and column is appended to the end and exploit
# Python's ability of negative indices: x[-1] is the last elem.
c = [[0 for _ in range(len(y) + 1)] for _ in range(len(x) + 1)]
for i, xi in enumerate(x):
for j, yj in enumerate(y):
if xi == yj:
c[i][j] = 1 + c[i-1][j-1]
else:
c[i][j] = max(c[i][j-1], c[i-1][j])
return c
def backtrack(c, x, y, i, j):
if i == -1 or j == -1:
return ""
elif x[i] == y[j]:
return backtrack(c, x, y, i-1, j-1) + x[i]
else:
if c[i][j-1] > c[i-1][j]:
return backtrack(c, x, y, i, j-1)
else:
return backtrack(c, x, y, i-1, j)
def lcs(x, y):
c = lcslen(x,y)
return backtrack(c, x, y, len(x)-1, len(y)-1)
|
|
db45d34db6e4c14be226f33a383a4034cc6f7048
|
lib/utils.py
|
lib/utils.py
|
import re
def find_memtions(content):
regex = re.compile(r"@(?P<username>)(\s|$)", re.I)
return [m.group("username") for m in regex.finditer(content)]
|
Add the method for finding names using regular expression matching.
|
Add the method for finding names using regular expression matching.
|
Python
|
mit
|
yiyangyi/cc98-tornado
|
Add the method for finding names using regular expression matching.
|
import re
def find_memtions(content):
regex = re.compile(r"@(?P<username>)(\s|$)", re.I)
return [m.group("username") for m in regex.finditer(content)]
|
<commit_before><commit_msg>Add the method for finding names using regular expression matching.<commit_after>
|
import re
def find_memtions(content):
regex = re.compile(r"@(?P<username>)(\s|$)", re.I)
return [m.group("username") for m in regex.finditer(content)]
|
Add the method for finding names using regular expression matching.import re
def find_memtions(content):
regex = re.compile(r"@(?P<username>)(\s|$)", re.I)
return [m.group("username") for m in regex.finditer(content)]
|
<commit_before><commit_msg>Add the method for finding names using regular expression matching.<commit_after>import re
def find_memtions(content):
regex = re.compile(r"@(?P<username>)(\s|$)", re.I)
return [m.group("username") for m in regex.finditer(content)]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.